Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * xHCI host controller driver
   4 *
   5 * Copyright (C) 2008 Intel Corp.
   6 *
   7 * Author: Sarah Sharp
   8 * Some code borrowed from the Linux EHCI driver.
   9 */
  10
  11#include <linux/usb.h>
  12#include <linux/pci.h>
  13#include <linux/slab.h>
  14#include <linux/dmapool.h>
  15#include <linux/dma-mapping.h>
  16
  17#include "xhci.h"
  18#include "xhci-trace.h"
  19#include "xhci-debugfs.h"
  20
  21/*
  22 * Allocates a generic ring segment from the ring pool, sets the dma address,
  23 * initializes the segment to zero, and sets the private next pointer to NULL.
  24 *
  25 * Section 4.11.1.1:
  26 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  27 */
  28static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
  29					       unsigned int cycle_state,
  30					       unsigned int max_packet,
  31					       gfp_t flags)
  32{
  33	struct xhci_segment *seg;
  34	dma_addr_t	dma;
  35	int		i;
  36	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
  37
  38	seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
  39	if (!seg)
  40		return NULL;
  41
  42	seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
  43	if (!seg->trbs) {
  44		kfree(seg);
  45		return NULL;
  46	}
  47
  48	if (max_packet) {
  49		seg->bounce_buf = kzalloc_node(max_packet, flags,
  50					dev_to_node(dev));
  51		if (!seg->bounce_buf) {
  52			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
  53			kfree(seg);
  54			return NULL;
  55		}
  56	}
  57	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
  58	if (cycle_state == 0) {
  59		for (i = 0; i < TRBS_PER_SEGMENT; i++)
  60			seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
  61	}
  62	seg->dma = dma;
  63	seg->next = NULL;
  64
  65	return seg;
  66}
  67
  68static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  69{
  70	if (seg->trbs) {
  71		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
  72		seg->trbs = NULL;
  73	}
  74	kfree(seg->bounce_buf);
  75	kfree(seg);
  76}
  77
  78static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
  79				struct xhci_segment *first)
  80{
  81	struct xhci_segment *seg;
  82
  83	seg = first->next;
  84	while (seg != first) {
  85		struct xhci_segment *next = seg->next;
  86		xhci_segment_free(xhci, seg);
  87		seg = next;
  88	}
  89	xhci_segment_free(xhci, first);
  90}
  91
  92/*
  93 * Make the prev segment point to the next segment.
  94 *
  95 * Change the last TRB in the prev segment to be a Link TRB which points to the
  96 * DMA address of the next segment.  The caller needs to set any Link TRB
  97 * related flags, such as End TRB, Toggle Cycle, and no snoop.
  98 */
  99static void xhci_link_segments(struct xhci_segment *prev,
 100			       struct xhci_segment *next,
 101			       enum xhci_ring_type type, bool chain_links)
 102{
 103	u32 val;
 104
 105	if (!prev || !next)
 106		return;
 107	prev->next = next;
 108	if (type != TYPE_EVENT) {
 109		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
 110			cpu_to_le64(next->dma);
 111
 112		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
 113		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
 114		val &= ~TRB_TYPE_BITMASK;
 115		val |= TRB_TYPE(TRB_LINK);
 116		if (chain_links)
 
 
 
 
 117			val |= TRB_CHAIN;
 118		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
 119	}
 120}
 121
 122/*
 123 * Link the ring to the new segments.
 124 * Set Toggle Cycle for the new ring if needed.
 125 */
 126static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
 127		struct xhci_segment *first, struct xhci_segment *last,
 128		unsigned int num_segs)
 129{
 130	struct xhci_segment *next;
 131	bool chain_links;
 132
 133	if (!ring || !first || !last)
 134		return;
 135
 136	/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
 137	chain_links = !!(xhci_link_trb_quirk(xhci) ||
 138			 (ring->type == TYPE_ISOC &&
 139			  (xhci->quirks & XHCI_AMD_0x96_HOST)));
 140
 141	next = ring->enq_seg->next;
 142	xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
 143	xhci_link_segments(last, next, ring->type, chain_links);
 144	ring->num_segs += num_segs;
 145	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
 146
 147	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
 148		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
 149			&= ~cpu_to_le32(LINK_TOGGLE);
 150		last->trbs[TRBS_PER_SEGMENT-1].link.control
 151			|= cpu_to_le32(LINK_TOGGLE);
 152		ring->last_seg = last;
 153	}
 154}
 155
 156/*
 157 * We need a radix tree for mapping physical addresses of TRBs to which stream
 158 * ID they belong to.  We need to do this because the host controller won't tell
 159 * us which stream ring the TRB came from.  We could store the stream ID in an
 160 * event data TRB, but that doesn't help us for the cancellation case, since the
 161 * endpoint may stop before it reaches that event data TRB.
 162 *
 163 * The radix tree maps the upper portion of the TRB DMA address to a ring
 164 * segment that has the same upper portion of DMA addresses.  For example, say I
 165 * have segments of size 1KB, that are always 1KB aligned.  A segment may
 166 * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
 167 * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
 168 * pass the radix tree a key to get the right stream ID:
 169 *
 170 *	0x10c90fff >> 10 = 0x43243
 171 *	0x10c912c0 >> 10 = 0x43244
 172 *	0x10c91400 >> 10 = 0x43245
 173 *
 174 * Obviously, only those TRBs with DMA addresses that are within the segment
 175 * will make the radix tree return the stream ID for that ring.
 176 *
 177 * Caveats for the radix tree:
 178 *
 179 * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
 180 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
 181 * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
 182 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
 183 * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
 184 * extended systems (where the DMA address can be bigger than 32-bits),
 185 * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
 186 */
 187static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
 188		struct xhci_ring *ring,
 189		struct xhci_segment *seg,
 190		gfp_t mem_flags)
 191{
 192	unsigned long key;
 193	int ret;
 194
 195	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
 196	/* Skip any segments that were already added. */
 197	if (radix_tree_lookup(trb_address_map, key))
 198		return 0;
 199
 200	ret = radix_tree_maybe_preload(mem_flags);
 201	if (ret)
 202		return ret;
 203	ret = radix_tree_insert(trb_address_map,
 204			key, ring);
 205	radix_tree_preload_end();
 206	return ret;
 207}
 208
 209static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
 210		struct xhci_segment *seg)
 211{
 212	unsigned long key;
 213
 214	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
 215	if (radix_tree_lookup(trb_address_map, key))
 216		radix_tree_delete(trb_address_map, key);
 217}
 218
 219static int xhci_update_stream_segment_mapping(
 220		struct radix_tree_root *trb_address_map,
 221		struct xhci_ring *ring,
 222		struct xhci_segment *first_seg,
 223		struct xhci_segment *last_seg,
 224		gfp_t mem_flags)
 225{
 226	struct xhci_segment *seg;
 227	struct xhci_segment *failed_seg;
 228	int ret;
 229
 230	if (WARN_ON_ONCE(trb_address_map == NULL))
 231		return 0;
 232
 233	seg = first_seg;
 234	do {
 235		ret = xhci_insert_segment_mapping(trb_address_map,
 236				ring, seg, mem_flags);
 237		if (ret)
 238			goto remove_streams;
 239		if (seg == last_seg)
 240			return 0;
 241		seg = seg->next;
 242	} while (seg != first_seg);
 243
 244	return 0;
 245
 246remove_streams:
 247	failed_seg = seg;
 248	seg = first_seg;
 249	do {
 250		xhci_remove_segment_mapping(trb_address_map, seg);
 251		if (seg == failed_seg)
 252			return ret;
 253		seg = seg->next;
 254	} while (seg != first_seg);
 255
 256	return ret;
 257}
 258
 259static void xhci_remove_stream_mapping(struct xhci_ring *ring)
 260{
 261	struct xhci_segment *seg;
 262
 263	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
 264		return;
 265
 266	seg = ring->first_seg;
 267	do {
 268		xhci_remove_segment_mapping(ring->trb_address_map, seg);
 269		seg = seg->next;
 270	} while (seg != ring->first_seg);
 271}
 272
 273static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
 274{
 275	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
 276			ring->first_seg, ring->last_seg, mem_flags);
 277}
 278
 279/* XXX: Do we need the hcd structure in all these functions? */
 280void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
 281{
 282	if (!ring)
 283		return;
 284
 285	trace_xhci_ring_free(ring);
 286
 287	if (ring->first_seg) {
 288		if (ring->type == TYPE_STREAM)
 289			xhci_remove_stream_mapping(ring);
 290		xhci_free_segments_for_ring(xhci, ring->first_seg);
 291	}
 292
 293	kfree(ring);
 294}
 295
 296void xhci_initialize_ring_info(struct xhci_ring *ring,
 297			       unsigned int cycle_state)
 298{
 299	/* The ring is empty, so the enqueue pointer == dequeue pointer */
 300	ring->enqueue = ring->first_seg->trbs;
 301	ring->enq_seg = ring->first_seg;
 302	ring->dequeue = ring->enqueue;
 303	ring->deq_seg = ring->first_seg;
 304	/* The ring is initialized to 0. The producer must write 1 to the cycle
 305	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
 306	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
 307	 *
 308	 * New rings are initialized with cycle state equal to 1; if we are
 309	 * handling ring expansion, set the cycle state equal to the old ring.
 310	 */
 311	ring->cycle_state = cycle_state;
 312
 313	/*
 314	 * Each segment has a link TRB, and leave an extra TRB for SW
 315	 * accounting purpose
 316	 */
 317	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
 318}
 319
 320/* Allocate segments and link them for a ring */
 321static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
 322		struct xhci_segment **first, struct xhci_segment **last,
 323		unsigned int num_segs, unsigned int cycle_state,
 324		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 325{
 326	struct xhci_segment *prev;
 327	bool chain_links;
 328
 329	/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
 330	chain_links = !!(xhci_link_trb_quirk(xhci) ||
 331			 (type == TYPE_ISOC &&
 332			  (xhci->quirks & XHCI_AMD_0x96_HOST)));
 333
 334	prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
 335	if (!prev)
 336		return -ENOMEM;
 337	num_segs--;
 338
 339	*first = prev;
 340	while (num_segs > 0) {
 341		struct xhci_segment	*next;
 342
 343		next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
 344		if (!next) {
 345			prev = *first;
 346			while (prev) {
 347				next = prev->next;
 348				xhci_segment_free(xhci, prev);
 349				prev = next;
 350			}
 351			return -ENOMEM;
 352		}
 353		xhci_link_segments(prev, next, type, chain_links);
 354
 355		prev = next;
 356		num_segs--;
 357	}
 358	xhci_link_segments(prev, *first, type, chain_links);
 359	*last = prev;
 360
 361	return 0;
 362}
 363
 364/*
 365 * Create a new ring with zero or more segments.
 366 *
 367 * Link each segment together into a ring.
 368 * Set the end flag and the cycle toggle bit on the last segment.
 369 * See section 4.9.1 and figures 15 and 16.
 370 */
 371struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
 372		unsigned int num_segs, unsigned int cycle_state,
 373		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 374{
 375	struct xhci_ring	*ring;
 376	int ret;
 377	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 378
 379	ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
 380	if (!ring)
 381		return NULL;
 382
 383	ring->num_segs = num_segs;
 384	ring->bounce_buf_len = max_packet;
 385	INIT_LIST_HEAD(&ring->td_list);
 386	ring->type = type;
 387	if (num_segs == 0)
 388		return ring;
 389
 390	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
 391			&ring->last_seg, num_segs, cycle_state, type,
 392			max_packet, flags);
 393	if (ret)
 394		goto fail;
 395
 396	/* Only event ring does not use link TRB */
 397	if (type != TYPE_EVENT) {
 398		/* See section 4.9.2.1 and 6.4.4.1 */
 399		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
 400			cpu_to_le32(LINK_TOGGLE);
 401	}
 402	xhci_initialize_ring_info(ring, cycle_state);
 403	trace_xhci_ring_alloc(ring);
 404	return ring;
 405
 406fail:
 407	kfree(ring);
 408	return NULL;
 409}
 410
 411void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
 412		struct xhci_virt_device *virt_dev,
 413		unsigned int ep_index)
 414{
 415	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
 416	virt_dev->eps[ep_index].ring = NULL;
 417}
 418
 419/*
 420 * Expand an existing ring.
 421 * Allocate a new ring which has same segment numbers and link the two rings.
 422 */
 423int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
 424				unsigned int num_trbs, gfp_t flags)
 425{
 426	struct xhci_segment	*first;
 427	struct xhci_segment	*last;
 428	unsigned int		num_segs;
 429	unsigned int		num_segs_needed;
 430	int			ret;
 431
 432	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
 433				(TRBS_PER_SEGMENT - 1);
 434
 435	/* Allocate number of segments we needed, or double the ring size */
 436	num_segs = max(ring->num_segs, num_segs_needed);
 
 437
 438	ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
 439			num_segs, ring->cycle_state, ring->type,
 440			ring->bounce_buf_len, flags);
 441	if (ret)
 442		return -ENOMEM;
 443
 444	if (ring->type == TYPE_STREAM)
 445		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
 446						ring, first, last, flags);
 447	if (ret) {
 448		struct xhci_segment *next;
 449		do {
 450			next = first->next;
 451			xhci_segment_free(xhci, first);
 452			if (first == last)
 453				break;
 454			first = next;
 455		} while (true);
 456		return ret;
 457	}
 458
 459	xhci_link_rings(xhci, ring, first, last, num_segs);
 460	trace_xhci_ring_expansion(ring);
 461	xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
 462			"ring expansion succeed, now has %d segments",
 463			ring->num_segs);
 464
 465	return 0;
 466}
 467
 468struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
 469						    int type, gfp_t flags)
 470{
 471	struct xhci_container_ctx *ctx;
 472	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 473
 474	if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
 475		return NULL;
 476
 477	ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
 478	if (!ctx)
 479		return NULL;
 480
 481	ctx->type = type;
 482	ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
 483	if (type == XHCI_CTX_TYPE_INPUT)
 484		ctx->size += CTX_SIZE(xhci->hcc_params);
 485
 486	ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
 487	if (!ctx->bytes) {
 488		kfree(ctx);
 489		return NULL;
 490	}
 491	return ctx;
 492}
 493
 494void xhci_free_container_ctx(struct xhci_hcd *xhci,
 495			     struct xhci_container_ctx *ctx)
 496{
 497	if (!ctx)
 498		return;
 499	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
 500	kfree(ctx);
 501}
 502
 503struct xhci_input_control_ctx *xhci_get_input_control_ctx(
 504					      struct xhci_container_ctx *ctx)
 505{
 506	if (ctx->type != XHCI_CTX_TYPE_INPUT)
 507		return NULL;
 508
 509	return (struct xhci_input_control_ctx *)ctx->bytes;
 510}
 511
 512struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
 513					struct xhci_container_ctx *ctx)
 514{
 515	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
 516		return (struct xhci_slot_ctx *)ctx->bytes;
 517
 518	return (struct xhci_slot_ctx *)
 519		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
 520}
 521
 522struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
 523				    struct xhci_container_ctx *ctx,
 524				    unsigned int ep_index)
 525{
 526	/* increment ep index by offset of start of ep ctx array */
 527	ep_index++;
 528	if (ctx->type == XHCI_CTX_TYPE_INPUT)
 529		ep_index++;
 530
 531	return (struct xhci_ep_ctx *)
 532		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
 533}
 534EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
 535
 536/***************** Streams structures manipulation *************************/
 537
 538static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
 539		unsigned int num_stream_ctxs,
 540		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 541{
 542	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 543	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
 544
 545	if (size > MEDIUM_STREAM_ARRAY_SIZE)
 546		dma_free_coherent(dev, size,
 547				stream_ctx, dma);
 548	else if (size <= SMALL_STREAM_ARRAY_SIZE)
 549		return dma_pool_free(xhci->small_streams_pool,
 550				stream_ctx, dma);
 551	else
 552		return dma_pool_free(xhci->medium_streams_pool,
 553				stream_ctx, dma);
 554}
 555
 556/*
 557 * The stream context array for each endpoint with bulk streams enabled can
 558 * vary in size, based on:
 559 *  - how many streams the endpoint supports,
 560 *  - the maximum primary stream array size the host controller supports,
 561 *  - and how many streams the device driver asks for.
 562 *
 563 * The stream context array must be a power of 2, and can be as small as
 564 * 64 bytes or as large as 1MB.
 565 */
 566static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
 567		unsigned int num_stream_ctxs, dma_addr_t *dma,
 568		gfp_t mem_flags)
 569{
 570	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 571	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
 572
 573	if (size > MEDIUM_STREAM_ARRAY_SIZE)
 574		return dma_alloc_coherent(dev, size,
 575				dma, mem_flags);
 576	else if (size <= SMALL_STREAM_ARRAY_SIZE)
 577		return dma_pool_alloc(xhci->small_streams_pool,
 578				mem_flags, dma);
 579	else
 580		return dma_pool_alloc(xhci->medium_streams_pool,
 581				mem_flags, dma);
 582}
 583
 584struct xhci_ring *xhci_dma_to_transfer_ring(
 585		struct xhci_virt_ep *ep,
 586		u64 address)
 587{
 588	if (ep->ep_state & EP_HAS_STREAMS)
 589		return radix_tree_lookup(&ep->stream_info->trb_address_map,
 590				address >> TRB_SEGMENT_SHIFT);
 591	return ep->ring;
 592}
 593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594/*
 595 * Change an endpoint's internal structure so it supports stream IDs.  The
 596 * number of requested streams includes stream 0, which cannot be used by device
 597 * drivers.
 598 *
 599 * The number of stream contexts in the stream context array may be bigger than
 600 * the number of streams the driver wants to use.  This is because the number of
 601 * stream context array entries must be a power of two.
 602 */
 603struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
 604		unsigned int num_stream_ctxs,
 605		unsigned int num_streams,
 606		unsigned int max_packet, gfp_t mem_flags)
 607{
 608	struct xhci_stream_info *stream_info;
 609	u32 cur_stream;
 610	struct xhci_ring *cur_ring;
 611	u64 addr;
 612	int ret;
 613	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 614
 615	xhci_dbg(xhci, "Allocating %u streams and %u "
 616			"stream context array entries.\n",
 617			num_streams, num_stream_ctxs);
 618	if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
 619		xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
 620		return NULL;
 621	}
 622	xhci->cmd_ring_reserved_trbs++;
 623
 624	stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
 625			dev_to_node(dev));
 626	if (!stream_info)
 627		goto cleanup_trbs;
 628
 629	stream_info->num_streams = num_streams;
 630	stream_info->num_stream_ctxs = num_stream_ctxs;
 631
 632	/* Initialize the array of virtual pointers to stream rings. */
 633	stream_info->stream_rings = kcalloc_node(
 634			num_streams, sizeof(struct xhci_ring *), mem_flags,
 635			dev_to_node(dev));
 636	if (!stream_info->stream_rings)
 637		goto cleanup_info;
 638
 639	/* Initialize the array of DMA addresses for stream rings for the HW. */
 640	stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
 641			num_stream_ctxs, &stream_info->ctx_array_dma,
 642			mem_flags);
 643	if (!stream_info->stream_ctx_array)
 644		goto cleanup_ring_array;
 645	memset(stream_info->stream_ctx_array, 0,
 646			sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
 647
 648	/* Allocate everything needed to free the stream rings later */
 649	stream_info->free_streams_command =
 650		xhci_alloc_command_with_ctx(xhci, true, mem_flags);
 651	if (!stream_info->free_streams_command)
 652		goto cleanup_ctx;
 653
 654	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
 655
 656	/* Allocate rings for all the streams that the driver will use,
 657	 * and add their segment DMA addresses to the radix tree.
 658	 * Stream 0 is reserved.
 659	 */
 660
 661	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
 662		stream_info->stream_rings[cur_stream] =
 663			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
 664					mem_flags);
 665		cur_ring = stream_info->stream_rings[cur_stream];
 666		if (!cur_ring)
 667			goto cleanup_rings;
 668		cur_ring->stream_id = cur_stream;
 669		cur_ring->trb_address_map = &stream_info->trb_address_map;
 670		/* Set deq ptr, cycle bit, and stream context type */
 671		addr = cur_ring->first_seg->dma |
 672			SCT_FOR_CTX(SCT_PRI_TR) |
 673			cur_ring->cycle_state;
 674		stream_info->stream_ctx_array[cur_stream].stream_ring =
 675			cpu_to_le64(addr);
 676		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
 677				cur_stream, (unsigned long long) addr);
 678
 679		ret = xhci_update_stream_mapping(cur_ring, mem_flags);
 680		if (ret) {
 681			xhci_ring_free(xhci, cur_ring);
 682			stream_info->stream_rings[cur_stream] = NULL;
 683			goto cleanup_rings;
 684		}
 685	}
 686	/* Leave the other unused stream ring pointers in the stream context
 687	 * array initialized to zero.  This will cause the xHC to give us an
 688	 * error if the device asks for a stream ID we don't have setup (if it
 689	 * was any other way, the host controller would assume the ring is
 690	 * "empty" and wait forever for data to be queued to that stream ID).
 691	 */
 692
 693	return stream_info;
 694
 695cleanup_rings:
 696	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
 697		cur_ring = stream_info->stream_rings[cur_stream];
 698		if (cur_ring) {
 699			xhci_ring_free(xhci, cur_ring);
 700			stream_info->stream_rings[cur_stream] = NULL;
 701		}
 702	}
 703	xhci_free_command(xhci, stream_info->free_streams_command);
 704cleanup_ctx:
 705	xhci_free_stream_ctx(xhci,
 706		stream_info->num_stream_ctxs,
 707		stream_info->stream_ctx_array,
 708		stream_info->ctx_array_dma);
 709cleanup_ring_array:
 710	kfree(stream_info->stream_rings);
 711cleanup_info:
 712	kfree(stream_info);
 713cleanup_trbs:
 714	xhci->cmd_ring_reserved_trbs--;
 715	return NULL;
 716}
 717/*
 718 * Sets the MaxPStreams field and the Linear Stream Array field.
 719 * Sets the dequeue pointer to the stream context array.
 720 */
 721void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
 722		struct xhci_ep_ctx *ep_ctx,
 723		struct xhci_stream_info *stream_info)
 724{
 725	u32 max_primary_streams;
 726	/* MaxPStreams is the number of stream context array entries, not the
 727	 * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
 728	 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
 729	 */
 730	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
 731	xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
 732			"Setting number of stream ctx array entries to %u",
 733			1 << (max_primary_streams + 1));
 734	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
 735	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
 736				       | EP_HAS_LSA);
 737	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
 738}
 739
 740/*
 741 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
 742 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
 743 * not at the beginning of the ring).
 744 */
 745void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
 746		struct xhci_virt_ep *ep)
 747{
 748	dma_addr_t addr;
 749	ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
 750	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
 751	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
 752}
 753
 754/* Frees all stream contexts associated with the endpoint,
 755 *
 756 * Caller should fix the endpoint context streams fields.
 757 */
 758void xhci_free_stream_info(struct xhci_hcd *xhci,
 759		struct xhci_stream_info *stream_info)
 760{
 761	int cur_stream;
 762	struct xhci_ring *cur_ring;
 763
 764	if (!stream_info)
 765		return;
 766
 767	for (cur_stream = 1; cur_stream < stream_info->num_streams;
 768			cur_stream++) {
 769		cur_ring = stream_info->stream_rings[cur_stream];
 770		if (cur_ring) {
 771			xhci_ring_free(xhci, cur_ring);
 772			stream_info->stream_rings[cur_stream] = NULL;
 773		}
 774	}
 775	xhci_free_command(xhci, stream_info->free_streams_command);
 776	xhci->cmd_ring_reserved_trbs--;
 777	if (stream_info->stream_ctx_array)
 778		xhci_free_stream_ctx(xhci,
 779				stream_info->num_stream_ctxs,
 780				stream_info->stream_ctx_array,
 781				stream_info->ctx_array_dma);
 782
 783	kfree(stream_info->stream_rings);
 784	kfree(stream_info);
 785}
 786
 787
 788/***************** Device context manipulation *************************/
 789
 
 
 
 
 
 
 
 
 790static void xhci_free_tt_info(struct xhci_hcd *xhci,
 791		struct xhci_virt_device *virt_dev,
 792		int slot_id)
 793{
 794	struct list_head *tt_list_head;
 795	struct xhci_tt_bw_info *tt_info, *next;
 796	bool slot_found = false;
 797
 798	/* If the device never made it past the Set Address stage,
 799	 * it may not have the real_port set correctly.
 800	 */
 801	if (virt_dev->real_port == 0 ||
 802			virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
 803		xhci_dbg(xhci, "Bad real port.\n");
 804		return;
 805	}
 806
 807	tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
 808	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
 809		/* Multi-TT hubs will have more than one entry */
 810		if (tt_info->slot_id == slot_id) {
 811			slot_found = true;
 812			list_del(&tt_info->tt_list);
 813			kfree(tt_info);
 814		} else if (slot_found) {
 815			break;
 816		}
 817	}
 818}
 819
 820int xhci_alloc_tt_info(struct xhci_hcd *xhci,
 821		struct xhci_virt_device *virt_dev,
 822		struct usb_device *hdev,
 823		struct usb_tt *tt, gfp_t mem_flags)
 824{
 825	struct xhci_tt_bw_info		*tt_info;
 826	unsigned int			num_ports;
 827	int				i, j;
 828	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 829
 830	if (!tt->multi)
 831		num_ports = 1;
 832	else
 833		num_ports = hdev->maxchild;
 834
 835	for (i = 0; i < num_ports; i++, tt_info++) {
 836		struct xhci_interval_bw_table *bw_table;
 837
 838		tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
 839				dev_to_node(dev));
 840		if (!tt_info)
 841			goto free_tts;
 842		INIT_LIST_HEAD(&tt_info->tt_list);
 843		list_add(&tt_info->tt_list,
 844				&xhci->rh_bw[virt_dev->real_port - 1].tts);
 845		tt_info->slot_id = virt_dev->udev->slot_id;
 846		if (tt->multi)
 847			tt_info->ttport = i+1;
 848		bw_table = &tt_info->bw_table;
 849		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
 850			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
 851	}
 852	return 0;
 853
 854free_tts:
 855	xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
 856	return -ENOMEM;
 857}
 858
 859
 860/* All the xhci_tds in the ring's TD list should be freed at this point.
 861 * Should be called with xhci->lock held if there is any chance the TT lists
 862 * will be manipulated by the configure endpoint, allocate device, or update
 863 * hub functions while this function is removing the TT entries from the list.
 864 */
 865void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 866{
 867	struct xhci_virt_device *dev;
 868	int i;
 869	int old_active_eps = 0;
 870
 871	/* Slot ID 0 is reserved */
 872	if (slot_id == 0 || !xhci->devs[slot_id])
 873		return;
 874
 875	dev = xhci->devs[slot_id];
 876
 877	xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
 878	if (!dev)
 879		return;
 880
 881	trace_xhci_free_virt_device(dev);
 882
 883	if (dev->tt_info)
 884		old_active_eps = dev->tt_info->active_eps;
 885
 886	for (i = 0; i < 31; i++) {
 887		if (dev->eps[i].ring)
 888			xhci_ring_free(xhci, dev->eps[i].ring);
 889		if (dev->eps[i].stream_info)
 890			xhci_free_stream_info(xhci,
 891					dev->eps[i].stream_info);
 892		/*
 893		 * Endpoints are normally deleted from the bandwidth list when
 894		 * endpoints are dropped, before device is freed.
 895		 * If host is dying or being removed then endpoints aren't
 896		 * dropped cleanly, so delete the endpoint from list here.
 897		 * Only applicable for hosts with software bandwidth checking.
 898		 */
 899
 900		if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
 901			list_del_init(&dev->eps[i].bw_endpoint_list);
 902			xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
 903				 slot_id, i);
 904		}
 905	}
 906	/* If this is a hub, free the TT(s) from the TT list */
 907	xhci_free_tt_info(xhci, dev, slot_id);
 908	/* If necessary, update the number of active TTs on this root port */
 909	xhci_update_tt_active_eps(xhci, dev, old_active_eps);
 910
 911	if (dev->in_ctx)
 912		xhci_free_container_ctx(xhci, dev->in_ctx);
 913	if (dev->out_ctx)
 914		xhci_free_container_ctx(xhci, dev->out_ctx);
 915
 916	if (dev->udev && dev->udev->slot_id)
 917		dev->udev->slot_id = 0;
 918	kfree(xhci->devs[slot_id]);
 919	xhci->devs[slot_id] = NULL;
 920}
 921
 922/*
 923 * Free a virt_device structure.
 924 * If the virt_device added a tt_info (a hub) and has children pointing to
 925 * that tt_info, then free the child first. Recursive.
 926 * We can't rely on udev at this point to find child-parent relationships.
 927 */
 928static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
 929{
 930	struct xhci_virt_device *vdev;
 931	struct list_head *tt_list_head;
 932	struct xhci_tt_bw_info *tt_info, *next;
 933	int i;
 934
 935	vdev = xhci->devs[slot_id];
 936	if (!vdev)
 937		return;
 938
 939	if (vdev->real_port == 0 ||
 940			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
 941		xhci_dbg(xhci, "Bad vdev->real_port.\n");
 942		goto out;
 943	}
 944
 945	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
 946	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
 947		/* is this a hub device that added a tt_info to the tts list */
 948		if (tt_info->slot_id == slot_id) {
 949			/* are any devices using this tt_info? */
 950			for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
 951				vdev = xhci->devs[i];
 952				if (vdev && (vdev->tt_info == tt_info))
 953					xhci_free_virt_devices_depth_first(
 954						xhci, i);
 955			}
 956		}
 957	}
 958out:
 959	/* we are now at a leaf device */
 960	xhci_debugfs_remove_slot(xhci, slot_id);
 961	xhci_free_virt_device(xhci, slot_id);
 962}
 963
 964int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 965		struct usb_device *udev, gfp_t flags)
 966{
 967	struct xhci_virt_device *dev;
 968	int i;
 969
 970	/* Slot ID 0 is reserved */
 971	if (slot_id == 0 || xhci->devs[slot_id]) {
 972		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
 973		return 0;
 974	}
 975
 976	dev = kzalloc(sizeof(*dev), flags);
 977	if (!dev)
 978		return 0;
 979
 980	dev->slot_id = slot_id;
 981
 982	/* Allocate the (output) device context that will be used in the HC. */
 983	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
 984	if (!dev->out_ctx)
 985		goto fail;
 986
 987	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
 988			(unsigned long long)dev->out_ctx->dma);
 989
 990	/* Allocate the (input) device context for address device command */
 991	dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
 992	if (!dev->in_ctx)
 993		goto fail;
 994
 995	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
 996			(unsigned long long)dev->in_ctx->dma);
 997
 998	/* Initialize the cancellation and bandwidth list for each ep */
 999	for (i = 0; i < 31; i++) {
1000		dev->eps[i].ep_index = i;
1001		dev->eps[i].vdev = dev;
1002		dev->eps[i].xhci = xhci;
1003		INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1004		INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1005	}
1006
1007	/* Allocate endpoint 0 ring */
1008	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1009	if (!dev->eps[0].ring)
1010		goto fail;
1011
1012	dev->udev = udev;
1013
1014	/* Point to output device context in dcbaa. */
1015	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1016	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1017		 slot_id,
1018		 &xhci->dcbaa->dev_context_ptrs[slot_id],
1019		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1020
1021	trace_xhci_alloc_virt_device(dev);
1022
1023	xhci->devs[slot_id] = dev;
1024
1025	return 1;
1026fail:
1027
1028	if (dev->in_ctx)
1029		xhci_free_container_ctx(xhci, dev->in_ctx);
1030	if (dev->out_ctx)
1031		xhci_free_container_ctx(xhci, dev->out_ctx);
1032	kfree(dev);
1033
1034	return 0;
1035}
1036
1037void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1038		struct usb_device *udev)
1039{
1040	struct xhci_virt_device *virt_dev;
1041	struct xhci_ep_ctx	*ep0_ctx;
1042	struct xhci_ring	*ep_ring;
1043
1044	virt_dev = xhci->devs[udev->slot_id];
1045	ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1046	ep_ring = virt_dev->eps[0].ring;
1047	/*
1048	 * FIXME we don't keep track of the dequeue pointer very well after a
1049	 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1050	 * host to our enqueue pointer.  This should only be called after a
1051	 * configured device has reset, so all control transfers should have
1052	 * been completed or cancelled before the reset.
1053	 */
1054	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1055							ep_ring->enqueue)
1056				   | ep_ring->cycle_state);
1057}
1058
1059/*
1060 * The xHCI roothub may have ports of differing speeds in any order in the port
1061 * status registers.
1062 *
1063 * The xHCI hardware wants to know the roothub port number that the USB device
1064 * is attached to (or the roothub port its ancestor hub is attached to).  All we
1065 * know is the index of that port under either the USB 2.0 or the USB 3.0
1066 * roothub, but that doesn't give us the real index into the HW port status
1067 * registers. Call xhci_find_raw_port_number() to get real index.
1068 */
1069static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1070		struct usb_device *udev)
1071{
1072	struct usb_device *top_dev;
1073	struct usb_hcd *hcd;
1074
1075	if (udev->speed >= USB_SPEED_SUPER)
1076		hcd = xhci_get_usb3_hcd(xhci);
1077	else
1078		hcd = xhci->main_hcd;
1079
1080	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1081			top_dev = top_dev->parent)
1082		/* Found device below root hub */;
1083
1084	return	xhci_find_raw_port_number(hcd, top_dev->portnum);
1085}
1086
1087/* Setup an xHCI virtual device for a Set Address command */
1088int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1089{
1090	struct xhci_virt_device *dev;
1091	struct xhci_ep_ctx	*ep0_ctx;
1092	struct xhci_slot_ctx    *slot_ctx;
1093	u32			port_num;
1094	u32			max_packets;
1095	struct usb_device *top_dev;
1096
1097	dev = xhci->devs[udev->slot_id];
1098	/* Slot ID 0 is reserved */
1099	if (udev->slot_id == 0 || !dev) {
1100		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1101				udev->slot_id);
1102		return -EINVAL;
1103	}
1104	ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1105	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1106
1107	/* 3) Only the control endpoint is valid - one endpoint context */
1108	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1109	switch (udev->speed) {
1110	case USB_SPEED_SUPER_PLUS:
1111		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1112		max_packets = MAX_PACKET(512);
1113		break;
1114	case USB_SPEED_SUPER:
1115		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1116		max_packets = MAX_PACKET(512);
1117		break;
1118	case USB_SPEED_HIGH:
1119		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1120		max_packets = MAX_PACKET(64);
1121		break;
1122	/* USB core guesses at a 64-byte max packet first for FS devices */
1123	case USB_SPEED_FULL:
1124		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1125		max_packets = MAX_PACKET(64);
1126		break;
1127	case USB_SPEED_LOW:
1128		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1129		max_packets = MAX_PACKET(8);
1130		break;
1131	case USB_SPEED_WIRELESS:
1132		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1133		return -EINVAL;
 
1134	default:
1135		/* Speed was set earlier, this shouldn't happen. */
1136		return -EINVAL;
1137	}
1138	/* Find the root hub port this device is under */
1139	port_num = xhci_find_real_port_number(xhci, udev);
1140	if (!port_num)
1141		return -EINVAL;
1142	slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1143	/* Set the port number in the virtual_device to the faked port number */
1144	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1145			top_dev = top_dev->parent)
1146		/* Found device below root hub */;
1147	dev->fake_port = top_dev->portnum;
1148	dev->real_port = port_num;
1149	xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1150	xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1151
1152	/* Find the right bandwidth table that this device will be a part of.
1153	 * If this is a full speed device attached directly to a root port (or a
1154	 * decendent of one), it counts as a primary bandwidth domain, not a
1155	 * secondary bandwidth domain under a TT.  An xhci_tt_info structure
1156	 * will never be created for the HS root hub.
1157	 */
1158	if (!udev->tt || !udev->tt->hub->parent) {
1159		dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1160	} else {
1161		struct xhci_root_port_bw_info *rh_bw;
1162		struct xhci_tt_bw_info *tt_bw;
1163
1164		rh_bw = &xhci->rh_bw[port_num - 1];
1165		/* Find the right TT. */
1166		list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1167			if (tt_bw->slot_id != udev->tt->hub->slot_id)
1168				continue;
1169
1170			if (!dev->udev->tt->multi ||
1171					(udev->tt->multi &&
1172					 tt_bw->ttport == dev->udev->ttport)) {
1173				dev->bw_table = &tt_bw->bw_table;
1174				dev->tt_info = tt_bw;
1175				break;
1176			}
1177		}
1178		if (!dev->tt_info)
1179			xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1180	}
1181
1182	/* Is this a LS/FS device under an external HS hub? */
1183	if (udev->tt && udev->tt->hub->parent) {
1184		slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1185						(udev->ttport << 8));
1186		if (udev->tt->multi)
1187			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1188	}
1189	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1190	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1191
1192	/* Step 4 - ring already allocated */
1193	/* Step 5 */
1194	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1195
1196	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1197	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1198					 max_packets);
1199
1200	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1201				   dev->eps[0].ring->cycle_state);
1202
1203	trace_xhci_setup_addressable_virt_device(dev);
1204
1205	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1206
1207	return 0;
1208}
1209
1210/*
1211 * Convert interval expressed as 2^(bInterval - 1) == interval into
1212 * straight exponent value 2^n == interval.
1213 *
1214 */
1215static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1216		struct usb_host_endpoint *ep)
1217{
1218	unsigned int interval;
1219
1220	interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1221	if (interval != ep->desc.bInterval - 1)
1222		dev_warn(&udev->dev,
1223			 "ep %#x - rounding interval to %d %sframes\n",
1224			 ep->desc.bEndpointAddress,
1225			 1 << interval,
1226			 udev->speed == USB_SPEED_FULL ? "" : "micro");
1227
1228	if (udev->speed == USB_SPEED_FULL) {
1229		/*
1230		 * Full speed isoc endpoints specify interval in frames,
1231		 * not microframes. We are using microframes everywhere,
1232		 * so adjust accordingly.
1233		 */
1234		interval += 3;	/* 1 frame = 2^3 uframes */
1235	}
1236
1237	return interval;
1238}
1239
1240/*
1241 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1242 * microframes, rounded down to nearest power of 2.
1243 */
1244static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1245		struct usb_host_endpoint *ep, unsigned int desc_interval,
1246		unsigned int min_exponent, unsigned int max_exponent)
1247{
1248	unsigned int interval;
1249
1250	interval = fls(desc_interval) - 1;
1251	interval = clamp_val(interval, min_exponent, max_exponent);
1252	if ((1 << interval) != desc_interval)
1253		dev_dbg(&udev->dev,
1254			 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1255			 ep->desc.bEndpointAddress,
1256			 1 << interval,
1257			 desc_interval);
1258
1259	return interval;
1260}
1261
1262static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1263		struct usb_host_endpoint *ep)
1264{
1265	if (ep->desc.bInterval == 0)
1266		return 0;
1267	return xhci_microframes_to_exponent(udev, ep,
1268			ep->desc.bInterval, 0, 15);
1269}
1270
1271
1272static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1273		struct usb_host_endpoint *ep)
1274{
1275	return xhci_microframes_to_exponent(udev, ep,
1276			ep->desc.bInterval * 8, 3, 10);
1277}
1278
1279/* Return the polling or NAK interval.
1280 *
1281 * The polling interval is expressed in "microframes".  If xHCI's Interval field
1282 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1283 *
1284 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1285 * is set to 0.
1286 */
1287static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1288		struct usb_host_endpoint *ep)
1289{
1290	unsigned int interval = 0;
1291
1292	switch (udev->speed) {
1293	case USB_SPEED_HIGH:
1294		/* Max NAK rate */
1295		if (usb_endpoint_xfer_control(&ep->desc) ||
1296		    usb_endpoint_xfer_bulk(&ep->desc)) {
1297			interval = xhci_parse_microframe_interval(udev, ep);
1298			break;
1299		}
1300		fallthrough;	/* SS and HS isoc/int have same decoding */
1301
1302	case USB_SPEED_SUPER_PLUS:
1303	case USB_SPEED_SUPER:
1304		if (usb_endpoint_xfer_int(&ep->desc) ||
1305		    usb_endpoint_xfer_isoc(&ep->desc)) {
1306			interval = xhci_parse_exponent_interval(udev, ep);
1307		}
1308		break;
1309
1310	case USB_SPEED_FULL:
1311		if (usb_endpoint_xfer_isoc(&ep->desc)) {
1312			interval = xhci_parse_exponent_interval(udev, ep);
1313			break;
1314		}
1315		/*
1316		 * Fall through for interrupt endpoint interval decoding
1317		 * since it uses the same rules as low speed interrupt
1318		 * endpoints.
1319		 */
1320		fallthrough;
1321
1322	case USB_SPEED_LOW:
1323		if (usb_endpoint_xfer_int(&ep->desc) ||
1324		    usb_endpoint_xfer_isoc(&ep->desc)) {
1325
1326			interval = xhci_parse_frame_interval(udev, ep);
1327		}
1328		break;
1329
1330	default:
1331		BUG();
1332	}
1333	return interval;
1334}
1335
1336/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1337 * High speed endpoint descriptors can define "the number of additional
1338 * transaction opportunities per microframe", but that goes in the Max Burst
1339 * endpoint context field.
1340 */
1341static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1342		struct usb_host_endpoint *ep)
1343{
1344	if (udev->speed < USB_SPEED_SUPER ||
1345			!usb_endpoint_xfer_isoc(&ep->desc))
1346		return 0;
1347	return ep->ss_ep_comp.bmAttributes;
1348}
1349
1350static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1351				       struct usb_host_endpoint *ep)
1352{
1353	/* Super speed and Plus have max burst in ep companion desc */
1354	if (udev->speed >= USB_SPEED_SUPER)
1355		return ep->ss_ep_comp.bMaxBurst;
1356
1357	if (udev->speed == USB_SPEED_HIGH &&
1358	    (usb_endpoint_xfer_isoc(&ep->desc) ||
1359	     usb_endpoint_xfer_int(&ep->desc)))
1360		return usb_endpoint_maxp_mult(&ep->desc) - 1;
1361
1362	return 0;
1363}
1364
1365static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1366{
1367	int in;
1368
1369	in = usb_endpoint_dir_in(&ep->desc);
1370
1371	switch (usb_endpoint_type(&ep->desc)) {
1372	case USB_ENDPOINT_XFER_CONTROL:
1373		return CTRL_EP;
1374	case USB_ENDPOINT_XFER_BULK:
1375		return in ? BULK_IN_EP : BULK_OUT_EP;
1376	case USB_ENDPOINT_XFER_ISOC:
1377		return in ? ISOC_IN_EP : ISOC_OUT_EP;
1378	case USB_ENDPOINT_XFER_INT:
1379		return in ? INT_IN_EP : INT_OUT_EP;
1380	}
1381	return 0;
1382}
1383
1384/* Return the maximum endpoint service interval time (ESIT) payload.
1385 * Basically, this is the maxpacket size, multiplied by the burst size
1386 * and mult size.
1387 */
1388static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1389		struct usb_host_endpoint *ep)
1390{
1391	int max_burst;
1392	int max_packet;
1393
1394	/* Only applies for interrupt or isochronous endpoints */
1395	if (usb_endpoint_xfer_control(&ep->desc) ||
1396			usb_endpoint_xfer_bulk(&ep->desc))
1397		return 0;
1398
1399	/* SuperSpeedPlus Isoc ep sending over 48k per esit */
1400	if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1401	    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1402		return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1403	/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1404	else if (udev->speed >= USB_SPEED_SUPER)
1405		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1406
1407	max_packet = usb_endpoint_maxp(&ep->desc);
1408	max_burst = usb_endpoint_maxp_mult(&ep->desc);
1409	/* A 0 in max burst means 1 transfer per ESIT */
1410	return max_packet * max_burst;
1411}
1412
1413/* Set up an endpoint with one ring segment.  Do not allocate stream rings.
1414 * Drivers will have to call usb_alloc_streams() to do that.
1415 */
1416int xhci_endpoint_init(struct xhci_hcd *xhci,
1417		struct xhci_virt_device *virt_dev,
1418		struct usb_device *udev,
1419		struct usb_host_endpoint *ep,
1420		gfp_t mem_flags)
1421{
1422	unsigned int ep_index;
1423	struct xhci_ep_ctx *ep_ctx;
1424	struct xhci_ring *ep_ring;
1425	unsigned int max_packet;
1426	enum xhci_ring_type ring_type;
1427	u32 max_esit_payload;
1428	u32 endpoint_type;
1429	unsigned int max_burst;
1430	unsigned int interval;
1431	unsigned int mult;
1432	unsigned int avg_trb_len;
1433	unsigned int err_count = 0;
1434
1435	ep_index = xhci_get_endpoint_index(&ep->desc);
1436	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1437
1438	endpoint_type = xhci_get_endpoint_type(ep);
1439	if (!endpoint_type)
1440		return -EINVAL;
1441
1442	ring_type = usb_endpoint_type(&ep->desc);
1443
1444	/*
1445	 * Get values to fill the endpoint context, mostly from ep descriptor.
1446	 * The average TRB buffer lengt for bulk endpoints is unclear as we
1447	 * have no clue on scatter gather list entry size. For Isoc and Int,
1448	 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1449	 */
1450	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1451	interval = xhci_get_endpoint_interval(udev, ep);
1452
1453	/* Periodic endpoint bInterval limit quirk */
1454	if (usb_endpoint_xfer_int(&ep->desc) ||
1455	    usb_endpoint_xfer_isoc(&ep->desc)) {
1456		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1457		    udev->speed >= USB_SPEED_HIGH &&
1458		    interval >= 7) {
1459			interval = 6;
1460		}
1461	}
1462
1463	mult = xhci_get_endpoint_mult(udev, ep);
1464	max_packet = usb_endpoint_maxp(&ep->desc);
1465	max_burst = xhci_get_endpoint_max_burst(udev, ep);
1466	avg_trb_len = max_esit_payload;
1467
1468	/* FIXME dig Mult and streams info out of ep companion desc */
1469
1470	/* Allow 3 retries for everything but isoc, set CErr = 3 */
1471	if (!usb_endpoint_xfer_isoc(&ep->desc))
1472		err_count = 3;
1473	/* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
1474	if (usb_endpoint_xfer_bulk(&ep->desc)) {
1475		if (udev->speed == USB_SPEED_HIGH)
1476			max_packet = 512;
1477		if (udev->speed == USB_SPEED_FULL) {
1478			max_packet = rounddown_pow_of_two(max_packet);
1479			max_packet = clamp_val(max_packet, 8, 64);
1480		}
1481	}
1482	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1483	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1484		avg_trb_len = 8;
1485	/* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1486	if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1487		mult = 0;
1488
1489	/* Set up the endpoint ring */
1490	virt_dev->eps[ep_index].new_ring =
1491		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1492	if (!virt_dev->eps[ep_index].new_ring)
1493		return -ENOMEM;
1494
1495	virt_dev->eps[ep_index].skip = false;
1496	ep_ring = virt_dev->eps[ep_index].new_ring;
1497
1498	/* Fill the endpoint context */
1499	ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1500				      EP_INTERVAL(interval) |
1501				      EP_MULT(mult));
1502	ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1503				       MAX_PACKET(max_packet) |
1504				       MAX_BURST(max_burst) |
1505				       ERROR_COUNT(err_count));
1506	ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1507				  ep_ring->cycle_state);
1508
1509	ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1510				      EP_AVG_TRB_LENGTH(avg_trb_len));
1511
1512	return 0;
1513}
1514
1515void xhci_endpoint_zero(struct xhci_hcd *xhci,
1516		struct xhci_virt_device *virt_dev,
1517		struct usb_host_endpoint *ep)
1518{
1519	unsigned int ep_index;
1520	struct xhci_ep_ctx *ep_ctx;
1521
1522	ep_index = xhci_get_endpoint_index(&ep->desc);
1523	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1524
1525	ep_ctx->ep_info = 0;
1526	ep_ctx->ep_info2 = 0;
1527	ep_ctx->deq = 0;
1528	ep_ctx->tx_info = 0;
1529	/* Don't free the endpoint ring until the set interface or configuration
1530	 * request succeeds.
1531	 */
1532}
1533
1534void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1535{
1536	bw_info->ep_interval = 0;
1537	bw_info->mult = 0;
1538	bw_info->num_packets = 0;
1539	bw_info->max_packet_size = 0;
1540	bw_info->type = 0;
1541	bw_info->max_esit_payload = 0;
1542}
1543
1544void xhci_update_bw_info(struct xhci_hcd *xhci,
1545		struct xhci_container_ctx *in_ctx,
1546		struct xhci_input_control_ctx *ctrl_ctx,
1547		struct xhci_virt_device *virt_dev)
1548{
1549	struct xhci_bw_info *bw_info;
1550	struct xhci_ep_ctx *ep_ctx;
1551	unsigned int ep_type;
1552	int i;
1553
1554	for (i = 1; i < 31; i++) {
1555		bw_info = &virt_dev->eps[i].bw_info;
1556
1557		/* We can't tell what endpoint type is being dropped, but
1558		 * unconditionally clearing the bandwidth info for non-periodic
1559		 * endpoints should be harmless because the info will never be
1560		 * set in the first place.
1561		 */
1562		if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1563			/* Dropped endpoint */
1564			xhci_clear_endpoint_bw_info(bw_info);
1565			continue;
1566		}
1567
1568		if (EP_IS_ADDED(ctrl_ctx, i)) {
1569			ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1570			ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1571
1572			/* Ignore non-periodic endpoints */
1573			if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1574					ep_type != ISOC_IN_EP &&
1575					ep_type != INT_IN_EP)
1576				continue;
1577
1578			/* Added or changed endpoint */
1579			bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1580					le32_to_cpu(ep_ctx->ep_info));
1581			/* Number of packets and mult are zero-based in the
1582			 * input context, but we want one-based for the
1583			 * interval table.
1584			 */
1585			bw_info->mult = CTX_TO_EP_MULT(
1586					le32_to_cpu(ep_ctx->ep_info)) + 1;
1587			bw_info->num_packets = CTX_TO_MAX_BURST(
1588					le32_to_cpu(ep_ctx->ep_info2)) + 1;
1589			bw_info->max_packet_size = MAX_PACKET_DECODED(
1590					le32_to_cpu(ep_ctx->ep_info2));
1591			bw_info->type = ep_type;
1592			bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1593					le32_to_cpu(ep_ctx->tx_info));
1594		}
1595	}
1596}
1597
1598/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1599 * Useful when you want to change one particular aspect of the endpoint and then
1600 * issue a configure endpoint command.
1601 */
1602void xhci_endpoint_copy(struct xhci_hcd *xhci,
1603		struct xhci_container_ctx *in_ctx,
1604		struct xhci_container_ctx *out_ctx,
1605		unsigned int ep_index)
1606{
1607	struct xhci_ep_ctx *out_ep_ctx;
1608	struct xhci_ep_ctx *in_ep_ctx;
1609
1610	out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1611	in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1612
1613	in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1614	in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1615	in_ep_ctx->deq = out_ep_ctx->deq;
1616	in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1617	if (xhci->quirks & XHCI_MTK_HOST) {
1618		in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1619		in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1620	}
1621}
1622
1623/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1624 * Useful when you want to change one particular aspect of the endpoint and then
1625 * issue a configure endpoint command.  Only the context entries field matters,
1626 * but we'll copy the whole thing anyway.
1627 */
1628void xhci_slot_copy(struct xhci_hcd *xhci,
1629		struct xhci_container_ctx *in_ctx,
1630		struct xhci_container_ctx *out_ctx)
1631{
1632	struct xhci_slot_ctx *in_slot_ctx;
1633	struct xhci_slot_ctx *out_slot_ctx;
1634
1635	in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1636	out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1637
1638	in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1639	in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1640	in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1641	in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1642}
1643
1644/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1645static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1646{
1647	int i;
1648	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1649	int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1650
1651	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1652			"Allocating %d scratchpad buffers", num_sp);
1653
1654	if (!num_sp)
1655		return 0;
1656
1657	xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1658				dev_to_node(dev));
1659	if (!xhci->scratchpad)
1660		goto fail_sp;
1661
1662	xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1663				     num_sp * sizeof(u64),
1664				     &xhci->scratchpad->sp_dma, flags);
1665	if (!xhci->scratchpad->sp_array)
1666		goto fail_sp2;
1667
1668	xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1669					flags, dev_to_node(dev));
1670	if (!xhci->scratchpad->sp_buffers)
1671		goto fail_sp3;
1672
1673	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1674	for (i = 0; i < num_sp; i++) {
1675		dma_addr_t dma;
1676		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1677					       flags);
1678		if (!buf)
1679			goto fail_sp4;
1680
1681		xhci->scratchpad->sp_array[i] = dma;
1682		xhci->scratchpad->sp_buffers[i] = buf;
1683	}
1684
1685	return 0;
1686
1687 fail_sp4:
1688	for (i = i - 1; i >= 0; i--) {
1689		dma_free_coherent(dev, xhci->page_size,
1690				    xhci->scratchpad->sp_buffers[i],
1691				    xhci->scratchpad->sp_array[i]);
1692	}
1693
1694	kfree(xhci->scratchpad->sp_buffers);
1695
1696 fail_sp3:
1697	dma_free_coherent(dev, num_sp * sizeof(u64),
1698			    xhci->scratchpad->sp_array,
1699			    xhci->scratchpad->sp_dma);
1700
1701 fail_sp2:
1702	kfree(xhci->scratchpad);
1703	xhci->scratchpad = NULL;
1704
1705 fail_sp:
1706	return -ENOMEM;
1707}
1708
1709static void scratchpad_free(struct xhci_hcd *xhci)
1710{
1711	int num_sp;
1712	int i;
1713	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1714
1715	if (!xhci->scratchpad)
1716		return;
1717
1718	num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1719
1720	for (i = 0; i < num_sp; i++) {
1721		dma_free_coherent(dev, xhci->page_size,
1722				    xhci->scratchpad->sp_buffers[i],
1723				    xhci->scratchpad->sp_array[i]);
1724	}
1725	kfree(xhci->scratchpad->sp_buffers);
1726	dma_free_coherent(dev, num_sp * sizeof(u64),
1727			    xhci->scratchpad->sp_array,
1728			    xhci->scratchpad->sp_dma);
1729	kfree(xhci->scratchpad);
1730	xhci->scratchpad = NULL;
1731}
1732
1733struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1734		bool allocate_completion, gfp_t mem_flags)
1735{
1736	struct xhci_command *command;
1737	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1738
1739	command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1740	if (!command)
1741		return NULL;
1742
1743	if (allocate_completion) {
1744		command->completion =
1745			kzalloc_node(sizeof(struct completion), mem_flags,
1746				dev_to_node(dev));
1747		if (!command->completion) {
1748			kfree(command);
1749			return NULL;
1750		}
1751		init_completion(command->completion);
1752	}
1753
1754	command->status = 0;
1755	INIT_LIST_HEAD(&command->cmd_list);
1756	return command;
1757}
1758
1759struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1760		bool allocate_completion, gfp_t mem_flags)
1761{
1762	struct xhci_command *command;
1763
1764	command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1765	if (!command)
1766		return NULL;
1767
1768	command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1769						   mem_flags);
1770	if (!command->in_ctx) {
1771		kfree(command->completion);
1772		kfree(command);
1773		return NULL;
1774	}
1775	return command;
1776}
1777
1778void xhci_urb_free_priv(struct urb_priv *urb_priv)
1779{
1780	kfree(urb_priv);
1781}
1782
1783void xhci_free_command(struct xhci_hcd *xhci,
1784		struct xhci_command *command)
1785{
1786	xhci_free_container_ctx(xhci,
1787			command->in_ctx);
1788	kfree(command->completion);
1789	kfree(command);
1790}
1791
1792int xhci_alloc_erst(struct xhci_hcd *xhci,
1793		    struct xhci_ring *evt_ring,
1794		    struct xhci_erst *erst,
1795		    gfp_t flags)
1796{
1797	size_t size;
1798	unsigned int val;
1799	struct xhci_segment *seg;
1800	struct xhci_erst_entry *entry;
1801
1802	size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1803	erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1804					   size, &erst->erst_dma_addr, flags);
1805	if (!erst->entries)
1806		return -ENOMEM;
1807
1808	erst->num_entries = evt_ring->num_segs;
1809
1810	seg = evt_ring->first_seg;
1811	for (val = 0; val < evt_ring->num_segs; val++) {
1812		entry = &erst->entries[val];
1813		entry->seg_addr = cpu_to_le64(seg->dma);
1814		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1815		entry->rsvd = 0;
1816		seg = seg->next;
1817	}
1818
1819	return 0;
1820}
1821
1822void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1823{
1824	size_t size;
1825	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1826
1827	size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1828	if (erst->entries)
1829		dma_free_coherent(dev, size,
1830				erst->entries,
1831				erst->erst_dma_addr);
1832	erst->entries = NULL;
1833}
1834
1835void xhci_mem_cleanup(struct xhci_hcd *xhci)
1836{
1837	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
1838	int i, j, num_ports;
1839
1840	cancel_delayed_work_sync(&xhci->cmd_timer);
1841
1842	xhci_free_erst(xhci, &xhci->erst);
1843
1844	if (xhci->event_ring)
1845		xhci_ring_free(xhci, xhci->event_ring);
1846	xhci->event_ring = NULL;
1847	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1848
 
 
 
1849	if (xhci->cmd_ring)
1850		xhci_ring_free(xhci, xhci->cmd_ring);
1851	xhci->cmd_ring = NULL;
1852	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1853	xhci_cleanup_command_queue(xhci);
1854
1855	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1856	for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1857		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1858		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1859			struct list_head *ep = &bwt->interval_bw[j].endpoints;
1860			while (!list_empty(ep))
1861				list_del_init(ep->next);
1862		}
1863	}
1864
1865	for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1866		xhci_free_virt_devices_depth_first(xhci, i);
1867
1868	dma_pool_destroy(xhci->segment_pool);
1869	xhci->segment_pool = NULL;
1870	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1871
1872	dma_pool_destroy(xhci->device_pool);
1873	xhci->device_pool = NULL;
1874	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1875
1876	dma_pool_destroy(xhci->small_streams_pool);
1877	xhci->small_streams_pool = NULL;
1878	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1879			"Freed small stream array pool");
1880
1881	dma_pool_destroy(xhci->medium_streams_pool);
1882	xhci->medium_streams_pool = NULL;
1883	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1884			"Freed medium stream array pool");
1885
1886	if (xhci->dcbaa)
1887		dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1888				xhci->dcbaa, xhci->dcbaa->dma);
1889	xhci->dcbaa = NULL;
1890
1891	scratchpad_free(xhci);
1892
1893	if (!xhci->rh_bw)
1894		goto no_bw;
1895
1896	for (i = 0; i < num_ports; i++) {
1897		struct xhci_tt_bw_info *tt, *n;
1898		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1899			list_del(&tt->tt_list);
1900			kfree(tt);
1901		}
1902	}
1903
1904no_bw:
1905	xhci->cmd_ring_reserved_trbs = 0;
1906	xhci->usb2_rhub.num_ports = 0;
1907	xhci->usb3_rhub.num_ports = 0;
1908	xhci->num_active_eps = 0;
1909	kfree(xhci->usb2_rhub.ports);
1910	kfree(xhci->usb3_rhub.ports);
1911	kfree(xhci->hw_ports);
1912	kfree(xhci->rh_bw);
1913	kfree(xhci->ext_caps);
1914	for (i = 0; i < xhci->num_port_caps; i++)
1915		kfree(xhci->port_caps[i].psi);
1916	kfree(xhci->port_caps);
1917	xhci->num_port_caps = 0;
1918
1919	xhci->usb2_rhub.ports = NULL;
1920	xhci->usb3_rhub.ports = NULL;
1921	xhci->hw_ports = NULL;
1922	xhci->rh_bw = NULL;
1923	xhci->ext_caps = NULL;
1924	xhci->port_caps = NULL;
1925
1926	xhci->page_size = 0;
1927	xhci->page_shift = 0;
1928	xhci->usb2_rhub.bus_state.bus_suspended = 0;
1929	xhci->usb3_rhub.bus_state.bus_suspended = 0;
1930}
1931
1932static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1933		struct xhci_segment *input_seg,
1934		union xhci_trb *start_trb,
1935		union xhci_trb *end_trb,
1936		dma_addr_t input_dma,
1937		struct xhci_segment *result_seg,
1938		char *test_name, int test_number)
1939{
1940	unsigned long long start_dma;
1941	unsigned long long end_dma;
1942	struct xhci_segment *seg;
1943
1944	start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1945	end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1946
1947	seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1948	if (seg != result_seg) {
1949		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1950				test_name, test_number);
1951		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1952				"input DMA 0x%llx\n",
1953				input_seg,
1954				(unsigned long long) input_dma);
1955		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1956				"ending TRB %p (0x%llx DMA)\n",
1957				start_trb, start_dma,
1958				end_trb, end_dma);
1959		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1960				result_seg, seg);
1961		trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1962			  true);
1963		return -1;
1964	}
1965	return 0;
1966}
1967
1968/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1969static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1970{
1971	struct {
1972		dma_addr_t		input_dma;
1973		struct xhci_segment	*result_seg;
1974	} simple_test_vector [] = {
1975		/* A zeroed DMA field should fail */
1976		{ 0, NULL },
1977		/* One TRB before the ring start should fail */
1978		{ xhci->event_ring->first_seg->dma - 16, NULL },
1979		/* One byte before the ring start should fail */
1980		{ xhci->event_ring->first_seg->dma - 1, NULL },
1981		/* Starting TRB should succeed */
1982		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1983		/* Ending TRB should succeed */
1984		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1985			xhci->event_ring->first_seg },
1986		/* One byte after the ring end should fail */
1987		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1988		/* One TRB after the ring end should fail */
1989		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1990		/* An address of all ones should fail */
1991		{ (dma_addr_t) (~0), NULL },
1992	};
1993	struct {
1994		struct xhci_segment	*input_seg;
1995		union xhci_trb		*start_trb;
1996		union xhci_trb		*end_trb;
1997		dma_addr_t		input_dma;
1998		struct xhci_segment	*result_seg;
1999	} complex_test_vector [] = {
2000		/* Test feeding a valid DMA address from a different ring */
2001		{	.input_seg = xhci->event_ring->first_seg,
2002			.start_trb = xhci->event_ring->first_seg->trbs,
2003			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2004			.input_dma = xhci->cmd_ring->first_seg->dma,
2005			.result_seg = NULL,
2006		},
2007		/* Test feeding a valid end TRB from a different ring */
2008		{	.input_seg = xhci->event_ring->first_seg,
2009			.start_trb = xhci->event_ring->first_seg->trbs,
2010			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2011			.input_dma = xhci->cmd_ring->first_seg->dma,
2012			.result_seg = NULL,
2013		},
2014		/* Test feeding a valid start and end TRB from a different ring */
2015		{	.input_seg = xhci->event_ring->first_seg,
2016			.start_trb = xhci->cmd_ring->first_seg->trbs,
2017			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2018			.input_dma = xhci->cmd_ring->first_seg->dma,
2019			.result_seg = NULL,
2020		},
2021		/* TRB in this ring, but after this TD */
2022		{	.input_seg = xhci->event_ring->first_seg,
2023			.start_trb = &xhci->event_ring->first_seg->trbs[0],
2024			.end_trb = &xhci->event_ring->first_seg->trbs[3],
2025			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
2026			.result_seg = NULL,
2027		},
2028		/* TRB in this ring, but before this TD */
2029		{	.input_seg = xhci->event_ring->first_seg,
2030			.start_trb = &xhci->event_ring->first_seg->trbs[3],
2031			.end_trb = &xhci->event_ring->first_seg->trbs[6],
2032			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
2033			.result_seg = NULL,
2034		},
2035		/* TRB in this ring, but after this wrapped TD */
2036		{	.input_seg = xhci->event_ring->first_seg,
2037			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2038			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2039			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
2040			.result_seg = NULL,
2041		},
2042		/* TRB in this ring, but before this wrapped TD */
2043		{	.input_seg = xhci->event_ring->first_seg,
2044			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2045			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2046			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2047			.result_seg = NULL,
2048		},
2049		/* TRB not in this ring, and we have a wrapped TD */
2050		{	.input_seg = xhci->event_ring->first_seg,
2051			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2052			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2053			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2054			.result_seg = NULL,
2055		},
2056	};
2057
2058	unsigned int num_tests;
2059	int i, ret;
2060
2061	num_tests = ARRAY_SIZE(simple_test_vector);
2062	for (i = 0; i < num_tests; i++) {
2063		ret = xhci_test_trb_in_td(xhci,
2064				xhci->event_ring->first_seg,
2065				xhci->event_ring->first_seg->trbs,
2066				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2067				simple_test_vector[i].input_dma,
2068				simple_test_vector[i].result_seg,
2069				"Simple", i);
2070		if (ret < 0)
2071			return ret;
2072	}
2073
2074	num_tests = ARRAY_SIZE(complex_test_vector);
2075	for (i = 0; i < num_tests; i++) {
2076		ret = xhci_test_trb_in_td(xhci,
2077				complex_test_vector[i].input_seg,
2078				complex_test_vector[i].start_trb,
2079				complex_test_vector[i].end_trb,
2080				complex_test_vector[i].input_dma,
2081				complex_test_vector[i].result_seg,
2082				"Complex", i);
2083		if (ret < 0)
2084			return ret;
2085	}
2086	xhci_dbg(xhci, "TRB math tests passed.\n");
2087	return 0;
2088}
2089
2090static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2091{
2092	u64 temp;
2093	dma_addr_t deq;
2094
2095	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2096			xhci->event_ring->dequeue);
2097	if (!deq)
2098		xhci_warn(xhci, "WARN something wrong with SW event ring "
2099				"dequeue ptr.\n");
2100	/* Update HC event ring dequeue pointer */
2101	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2102	temp &= ERST_PTR_MASK;
2103	/* Don't clear the EHB bit (which is RW1C) because
2104	 * there might be more events to service.
2105	 */
2106	temp &= ~ERST_EHB;
2107	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2108			"// Write event ring dequeue pointer, "
2109			"preserving EHB bit");
2110	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2111			&xhci->ir_set->erst_dequeue);
2112}
2113
2114static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2115		__le32 __iomem *addr, int max_caps)
2116{
2117	u32 temp, port_offset, port_count;
2118	int i;
2119	u8 major_revision, minor_revision;
2120	struct xhci_hub *rhub;
2121	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2122	struct xhci_port_cap *port_cap;
2123
2124	temp = readl(addr);
2125	major_revision = XHCI_EXT_PORT_MAJOR(temp);
2126	minor_revision = XHCI_EXT_PORT_MINOR(temp);
2127
2128	if (major_revision == 0x03) {
2129		rhub = &xhci->usb3_rhub;
2130		/*
2131		 * Some hosts incorrectly use sub-minor version for minor
2132		 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
2133		 * for bcdUSB 0x310). Since there is no USB release with sub
2134		 * minor version 0x301 to 0x309, we can assume that they are
2135		 * incorrect and fix it here.
2136		 */
2137		if (minor_revision > 0x00 && minor_revision < 0x10)
2138			minor_revision <<= 4;
2139	} else if (major_revision <= 0x02) {
2140		rhub = &xhci->usb2_rhub;
2141	} else {
2142		xhci_warn(xhci, "Ignoring unknown port speed, "
2143				"Ext Cap %p, revision = 0x%x\n",
2144				addr, major_revision);
2145		/* Ignoring port protocol we can't understand. FIXME */
2146		return;
2147	}
2148	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2149
2150	if (rhub->min_rev < minor_revision)
2151		rhub->min_rev = minor_revision;
2152
2153	/* Port offset and count in the third dword, see section 7.2 */
2154	temp = readl(addr + 2);
2155	port_offset = XHCI_EXT_PORT_OFF(temp);
2156	port_count = XHCI_EXT_PORT_COUNT(temp);
2157	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2158			"Ext Cap %p, port offset = %u, "
2159			"count = %u, revision = 0x%x",
2160			addr, port_offset, port_count, major_revision);
2161	/* Port count includes the current port offset */
2162	if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2163		/* WTF? "Valid values are ‘1’ to MaxPorts" */
2164		return;
2165
2166	port_cap = &xhci->port_caps[xhci->num_port_caps++];
2167	if (xhci->num_port_caps > max_caps)
2168		return;
2169
2170	port_cap->maj_rev = major_revision;
2171	port_cap->min_rev = minor_revision;
2172	port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2173
2174	if (port_cap->psi_count) {
2175		port_cap->psi = kcalloc_node(port_cap->psi_count,
2176					     sizeof(*port_cap->psi),
2177					     GFP_KERNEL, dev_to_node(dev));
2178		if (!port_cap->psi)
2179			port_cap->psi_count = 0;
2180
2181		port_cap->psi_uid_count++;
2182		for (i = 0; i < port_cap->psi_count; i++) {
2183			port_cap->psi[i] = readl(addr + 4 + i);
2184
2185			/* count unique ID values, two consecutive entries can
2186			 * have the same ID if link is assymetric
2187			 */
2188			if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2189				  XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2190				port_cap->psi_uid_count++;
2191
2192			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2193				  XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2194				  XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2195				  XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2196				  XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2197				  XHCI_EXT_PORT_LP(port_cap->psi[i]),
2198				  XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2199		}
2200	}
2201	/* cache usb2 port capabilities */
2202	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2203		xhci->ext_caps[xhci->num_ext_caps++] = temp;
2204
2205	if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2206		 (temp & XHCI_HLC)) {
2207		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2208			       "xHCI 1.0: support USB2 hardware lpm");
2209		xhci->hw_lpm_support = 1;
2210	}
2211
2212	port_offset--;
2213	for (i = port_offset; i < (port_offset + port_count); i++) {
2214		struct xhci_port *hw_port = &xhci->hw_ports[i];
2215		/* Duplicate entry.  Ignore the port if the revisions differ. */
2216		if (hw_port->rhub) {
2217			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2218					" port %u\n", addr, i);
2219			xhci_warn(xhci, "Port was marked as USB %u, "
2220					"duplicated as USB %u\n",
2221					hw_port->rhub->maj_rev, major_revision);
2222			/* Only adjust the roothub port counts if we haven't
2223			 * found a similar duplicate.
2224			 */
2225			if (hw_port->rhub != rhub &&
2226				 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2227				hw_port->rhub->num_ports--;
2228				hw_port->hcd_portnum = DUPLICATE_ENTRY;
2229			}
2230			continue;
2231		}
2232		hw_port->rhub = rhub;
2233		hw_port->port_cap = port_cap;
2234		rhub->num_ports++;
2235	}
2236	/* FIXME: Should we disable ports not in the Extended Capabilities? */
2237}
2238
2239static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2240					struct xhci_hub *rhub, gfp_t flags)
2241{
2242	int port_index = 0;
2243	int i;
2244	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2245
2246	if (!rhub->num_ports)
2247		return;
2248	rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2249			flags, dev_to_node(dev));
2250	if (!rhub->ports)
2251		return;
2252
2253	for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2254		if (xhci->hw_ports[i].rhub != rhub ||
2255		    xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2256			continue;
2257		xhci->hw_ports[i].hcd_portnum = port_index;
2258		rhub->ports[port_index] = &xhci->hw_ports[i];
2259		port_index++;
2260		if (port_index == rhub->num_ports)
2261			break;
2262	}
2263}
2264
2265/*
2266 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2267 * specify what speeds each port is supposed to be.  We can't count on the port
2268 * speed bits in the PORTSC register being correct until a device is connected,
2269 * but we need to set up the two fake roothubs with the correct number of USB
2270 * 3.0 and USB 2.0 ports at host controller initialization time.
2271 */
2272static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2273{
2274	void __iomem *base;
2275	u32 offset;
2276	unsigned int num_ports;
2277	int i, j;
2278	int cap_count = 0;
2279	u32 cap_start;
2280	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2281
2282	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2283	xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2284				flags, dev_to_node(dev));
2285	if (!xhci->hw_ports)
2286		return -ENOMEM;
2287
2288	for (i = 0; i < num_ports; i++) {
2289		xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2290			NUM_PORT_REGS * i;
2291		xhci->hw_ports[i].hw_portnum = i;
2292	}
2293
2294	xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2295				   dev_to_node(dev));
2296	if (!xhci->rh_bw)
2297		return -ENOMEM;
2298	for (i = 0; i < num_ports; i++) {
2299		struct xhci_interval_bw_table *bw_table;
2300
2301		INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2302		bw_table = &xhci->rh_bw[i].bw_table;
2303		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2304			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2305	}
2306	base = &xhci->cap_regs->hc_capbase;
2307
2308	cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2309	if (!cap_start) {
2310		xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2311		return -ENODEV;
2312	}
2313
2314	offset = cap_start;
2315	/* count extended protocol capability entries for later caching */
2316	while (offset) {
2317		cap_count++;
2318		offset = xhci_find_next_ext_cap(base, offset,
2319						      XHCI_EXT_CAPS_PROTOCOL);
2320	}
2321
2322	xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2323				flags, dev_to_node(dev));
2324	if (!xhci->ext_caps)
2325		return -ENOMEM;
2326
2327	xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2328				flags, dev_to_node(dev));
2329	if (!xhci->port_caps)
2330		return -ENOMEM;
2331
2332	offset = cap_start;
2333
2334	while (offset) {
2335		xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2336		if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2337		    num_ports)
2338			break;
2339		offset = xhci_find_next_ext_cap(base, offset,
2340						XHCI_EXT_CAPS_PROTOCOL);
2341	}
2342	if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2343		xhci_warn(xhci, "No ports on the roothubs?\n");
2344		return -ENODEV;
2345	}
2346	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2347		       "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2348		       xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2349
2350	/* Place limits on the number of roothub ports so that the hub
2351	 * descriptors aren't longer than the USB core will allocate.
2352	 */
2353	if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2354		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2355				"Limiting USB 3.0 roothub ports to %u.",
2356				USB_SS_MAXPORTS);
2357		xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2358	}
2359	if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2360		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2361				"Limiting USB 2.0 roothub ports to %u.",
2362				USB_MAXCHILDREN);
2363		xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2364	}
2365
2366	if (!xhci->usb2_rhub.num_ports)
2367		xhci_info(xhci, "USB2 root hub has no ports\n");
2368
2369	if (!xhci->usb3_rhub.num_ports)
2370		xhci_info(xhci, "USB3 root hub has no ports\n");
2371
2372	xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2373	xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2374
2375	return 0;
2376}
2377
2378int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2379{
2380	dma_addr_t	dma;
2381	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
2382	unsigned int	val, val2;
2383	u64		val_64;
2384	u32		page_size, temp;
2385	int		i, ret;
2386
2387	INIT_LIST_HEAD(&xhci->cmd_list);
2388
2389	/* init command timeout work */
2390	INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2391	init_completion(&xhci->cmd_ring_stop_completion);
2392
2393	page_size = readl(&xhci->op_regs->page_size);
2394	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2395			"Supported page size register = 0x%x", page_size);
2396	i = ffs(page_size);
 
 
 
 
2397	if (i < 16)
2398		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2399			"Supported page size of %iK", (1 << (i+12)) / 1024);
2400	else
2401		xhci_warn(xhci, "WARN: no supported page size\n");
2402	/* Use 4K pages, since that's common and the minimum the HC supports */
2403	xhci->page_shift = 12;
2404	xhci->page_size = 1 << xhci->page_shift;
2405	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2406			"HCD page size set to %iK", xhci->page_size / 1024);
2407
2408	/*
2409	 * Program the Number of Device Slots Enabled field in the CONFIG
2410	 * register with the max value of slots the HC can handle.
2411	 */
2412	val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2413	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2414			"// xHC can handle at most %d device slots.", val);
2415	val2 = readl(&xhci->op_regs->config_reg);
2416	val |= (val2 & ~HCS_SLOTS_MASK);
2417	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2418			"// Setting Max device slots reg = 0x%x.", val);
2419	writel(val, &xhci->op_regs->config_reg);
2420
2421	/*
2422	 * xHCI section 5.4.6 - Device Context array must be
2423	 * "physically contiguous and 64-byte (cache line) aligned".
2424	 */
2425	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2426			flags);
2427	if (!xhci->dcbaa)
2428		goto fail;
2429	xhci->dcbaa->dma = dma;
2430	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2431			"// Device context base array address = 0x%llx (DMA), %p (virt)",
2432			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2433	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2434
2435	/*
2436	 * Initialize the ring segment pool.  The ring must be a contiguous
2437	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
2438	 * however, the command ring segment needs 64-byte aligned segments
2439	 * and our use of dma addresses in the trb_address_map radix tree needs
2440	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2441	 */
2442	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2443			TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2444
2445	/* See Table 46 and Note on Figure 55 */
2446	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2447			2112, 64, xhci->page_size);
2448	if (!xhci->segment_pool || !xhci->device_pool)
2449		goto fail;
2450
2451	/* Linear stream context arrays don't have any boundary restrictions,
2452	 * and only need to be 16-byte aligned.
2453	 */
2454	xhci->small_streams_pool =
2455		dma_pool_create("xHCI 256 byte stream ctx arrays",
2456			dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2457	xhci->medium_streams_pool =
2458		dma_pool_create("xHCI 1KB stream ctx arrays",
2459			dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2460	/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2461	 * will be allocated with dma_alloc_coherent()
2462	 */
2463
2464	if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2465		goto fail;
2466
2467	/* Set up the command ring to have one segments for now. */
2468	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2469	if (!xhci->cmd_ring)
2470		goto fail;
2471	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2472			"Allocated command ring at %p", xhci->cmd_ring);
2473	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2474			(unsigned long long)xhci->cmd_ring->first_seg->dma);
2475
2476	/* Set the address in the Command Ring Control register */
2477	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2478	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2479		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2480		xhci->cmd_ring->cycle_state;
2481	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2482			"// Setting command ring address to 0x%016llx", val_64);
2483	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2484
 
 
 
 
2485	/* Reserve one command ring TRB for disabling LPM.
2486	 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2487	 * disabling LPM, we only need to reserve one TRB for all devices.
2488	 */
2489	xhci->cmd_ring_reserved_trbs++;
2490
2491	val = readl(&xhci->cap_regs->db_off);
2492	val &= DBOFF_MASK;
2493	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2494			"// Doorbell array is located at offset 0x%x"
2495			" from cap regs base addr", val);
2496	xhci->dba = (void __iomem *) xhci->cap_regs + val;
2497	/* Set ir_set to interrupt register set 0 */
2498	xhci->ir_set = &xhci->run_regs->ir_set[0];
2499
2500	/*
2501	 * Event ring setup: Allocate a normal ring, but also setup
2502	 * the event ring segment table (ERST).  Section 4.9.3.
2503	 */
2504	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2505	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2506					0, flags);
2507	if (!xhci->event_ring)
2508		goto fail;
2509	if (xhci_check_trb_in_td_math(xhci) < 0)
2510		goto fail;
2511
2512	ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2513	if (ret)
2514		goto fail;
2515
2516	/* set ERST count with the number of entries in the segment table */
2517	val = readl(&xhci->ir_set->erst_size);
2518	val &= ERST_SIZE_MASK;
2519	val |= ERST_NUM_SEGS;
2520	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2521			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
2522			val);
2523	writel(val, &xhci->ir_set->erst_size);
2524
2525	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2526			"// Set ERST entries to point to event ring.");
2527	/* set the segment table base address */
2528	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2529			"// Set ERST base address for ir_set 0 = 0x%llx",
2530			(unsigned long long)xhci->erst.erst_dma_addr);
2531	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2532	val_64 &= ERST_PTR_MASK;
2533	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2534	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2535
2536	/* Set the event ring dequeue address */
2537	xhci_set_hc_event_deq(xhci);
2538	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2539			"Wrote ERST address to ir_set 0.");
2540
2541	xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
2542
2543	/*
2544	 * XXX: Might need to set the Interrupter Moderation Register to
2545	 * something other than the default (~1ms minimum between interrupts).
2546	 * See section 5.5.1.2.
2547	 */
2548	for (i = 0; i < MAX_HC_SLOTS; i++)
2549		xhci->devs[i] = NULL;
2550	for (i = 0; i < USB_MAXCHILDREN; i++) {
2551		xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2552		xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2553		/* Only the USB 2.0 completions will ever be used. */
2554		init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2555		init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
2556	}
2557
2558	if (scratchpad_alloc(xhci, flags))
2559		goto fail;
2560	if (xhci_setup_port_arrays(xhci, flags))
2561		goto fail;
2562
2563	/* Enable USB 3.0 device notifications for function remote wake, which
2564	 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2565	 * U3 (device suspend).
2566	 */
2567	temp = readl(&xhci->op_regs->dev_notification);
2568	temp &= ~DEV_NOTE_MASK;
2569	temp |= DEV_NOTE_FWAKE;
2570	writel(temp, &xhci->op_regs->dev_notification);
2571
2572	return 0;
2573
2574fail:
2575	xhci_halt(xhci);
2576	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
2577	xhci_mem_cleanup(xhci);
2578	return -ENOMEM;
2579}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * xHCI host controller driver
   4 *
   5 * Copyright (C) 2008 Intel Corp.
   6 *
   7 * Author: Sarah Sharp
   8 * Some code borrowed from the Linux EHCI driver.
   9 */
  10
  11#include <linux/usb.h>
  12#include <linux/pci.h>
  13#include <linux/slab.h>
  14#include <linux/dmapool.h>
  15#include <linux/dma-mapping.h>
  16
  17#include "xhci.h"
  18#include "xhci-trace.h"
  19#include "xhci-debugfs.h"
  20
  21/*
  22 * Allocates a generic ring segment from the ring pool, sets the dma address,
  23 * initializes the segment to zero, and sets the private next pointer to NULL.
  24 *
  25 * Section 4.11.1.1:
  26 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  27 */
  28static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
  29					       unsigned int cycle_state,
  30					       unsigned int max_packet,
  31					       gfp_t flags)
  32{
  33	struct xhci_segment *seg;
  34	dma_addr_t	dma;
  35	int		i;
  36	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
  37
  38	seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
  39	if (!seg)
  40		return NULL;
  41
  42	seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
  43	if (!seg->trbs) {
  44		kfree(seg);
  45		return NULL;
  46	}
  47
  48	if (max_packet) {
  49		seg->bounce_buf = kzalloc_node(max_packet, flags,
  50					dev_to_node(dev));
  51		if (!seg->bounce_buf) {
  52			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
  53			kfree(seg);
  54			return NULL;
  55		}
  56	}
  57	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
  58	if (cycle_state == 0) {
  59		for (i = 0; i < TRBS_PER_SEGMENT; i++)
  60			seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
  61	}
  62	seg->dma = dma;
  63	seg->next = NULL;
  64
  65	return seg;
  66}
  67
  68static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
  69{
  70	if (seg->trbs) {
  71		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
  72		seg->trbs = NULL;
  73	}
  74	kfree(seg->bounce_buf);
  75	kfree(seg);
  76}
  77
  78static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
  79				struct xhci_segment *first)
  80{
  81	struct xhci_segment *seg;
  82
  83	seg = first->next;
  84	while (seg != first) {
  85		struct xhci_segment *next = seg->next;
  86		xhci_segment_free(xhci, seg);
  87		seg = next;
  88	}
  89	xhci_segment_free(xhci, first);
  90}
  91
  92/*
  93 * Make the prev segment point to the next segment.
  94 *
  95 * Change the last TRB in the prev segment to be a Link TRB which points to the
  96 * DMA address of the next segment.  The caller needs to set any Link TRB
  97 * related flags, such as End TRB, Toggle Cycle, and no snoop.
  98 */
  99static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
 100		struct xhci_segment *next, enum xhci_ring_type type)
 
 101{
 102	u32 val;
 103
 104	if (!prev || !next)
 105		return;
 106	prev->next = next;
 107	if (type != TYPE_EVENT) {
 108		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
 109			cpu_to_le64(next->dma);
 110
 111		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
 112		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
 113		val &= ~TRB_TYPE_BITMASK;
 114		val |= TRB_TYPE(TRB_LINK);
 115		/* Always set the chain bit with 0.95 hardware */
 116		/* Set chain bit for isoc rings on AMD 0.96 host */
 117		if (xhci_link_trb_quirk(xhci) ||
 118				(type == TYPE_ISOC &&
 119				 (xhci->quirks & XHCI_AMD_0x96_HOST)))
 120			val |= TRB_CHAIN;
 121		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
 122	}
 123}
 124
 125/*
 126 * Link the ring to the new segments.
 127 * Set Toggle Cycle for the new ring if needed.
 128 */
 129static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
 130		struct xhci_segment *first, struct xhci_segment *last,
 131		unsigned int num_segs)
 132{
 133	struct xhci_segment *next;
 
 134
 135	if (!ring || !first || !last)
 136		return;
 137
 
 
 
 
 
 138	next = ring->enq_seg->next;
 139	xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
 140	xhci_link_segments(xhci, last, next, ring->type);
 141	ring->num_segs += num_segs;
 142	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
 143
 144	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
 145		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
 146			&= ~cpu_to_le32(LINK_TOGGLE);
 147		last->trbs[TRBS_PER_SEGMENT-1].link.control
 148			|= cpu_to_le32(LINK_TOGGLE);
 149		ring->last_seg = last;
 150	}
 151}
 152
 153/*
 154 * We need a radix tree for mapping physical addresses of TRBs to which stream
 155 * ID they belong to.  We need to do this because the host controller won't tell
 156 * us which stream ring the TRB came from.  We could store the stream ID in an
 157 * event data TRB, but that doesn't help us for the cancellation case, since the
 158 * endpoint may stop before it reaches that event data TRB.
 159 *
 160 * The radix tree maps the upper portion of the TRB DMA address to a ring
 161 * segment that has the same upper portion of DMA addresses.  For example, say I
 162 * have segments of size 1KB, that are always 1KB aligned.  A segment may
 163 * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
 164 * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
 165 * pass the radix tree a key to get the right stream ID:
 166 *
 167 *	0x10c90fff >> 10 = 0x43243
 168 *	0x10c912c0 >> 10 = 0x43244
 169 *	0x10c91400 >> 10 = 0x43245
 170 *
 171 * Obviously, only those TRBs with DMA addresses that are within the segment
 172 * will make the radix tree return the stream ID for that ring.
 173 *
 174 * Caveats for the radix tree:
 175 *
 176 * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
 177 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
 178 * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
 179 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
 180 * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
 181 * extended systems (where the DMA address can be bigger than 32-bits),
 182 * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
 183 */
 184static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
 185		struct xhci_ring *ring,
 186		struct xhci_segment *seg,
 187		gfp_t mem_flags)
 188{
 189	unsigned long key;
 190	int ret;
 191
 192	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
 193	/* Skip any segments that were already added. */
 194	if (radix_tree_lookup(trb_address_map, key))
 195		return 0;
 196
 197	ret = radix_tree_maybe_preload(mem_flags);
 198	if (ret)
 199		return ret;
 200	ret = radix_tree_insert(trb_address_map,
 201			key, ring);
 202	radix_tree_preload_end();
 203	return ret;
 204}
 205
 206static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
 207		struct xhci_segment *seg)
 208{
 209	unsigned long key;
 210
 211	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
 212	if (radix_tree_lookup(trb_address_map, key))
 213		radix_tree_delete(trb_address_map, key);
 214}
 215
 216static int xhci_update_stream_segment_mapping(
 217		struct radix_tree_root *trb_address_map,
 218		struct xhci_ring *ring,
 219		struct xhci_segment *first_seg,
 220		struct xhci_segment *last_seg,
 221		gfp_t mem_flags)
 222{
 223	struct xhci_segment *seg;
 224	struct xhci_segment *failed_seg;
 225	int ret;
 226
 227	if (WARN_ON_ONCE(trb_address_map == NULL))
 228		return 0;
 229
 230	seg = first_seg;
 231	do {
 232		ret = xhci_insert_segment_mapping(trb_address_map,
 233				ring, seg, mem_flags);
 234		if (ret)
 235			goto remove_streams;
 236		if (seg == last_seg)
 237			return 0;
 238		seg = seg->next;
 239	} while (seg != first_seg);
 240
 241	return 0;
 242
 243remove_streams:
 244	failed_seg = seg;
 245	seg = first_seg;
 246	do {
 247		xhci_remove_segment_mapping(trb_address_map, seg);
 248		if (seg == failed_seg)
 249			return ret;
 250		seg = seg->next;
 251	} while (seg != first_seg);
 252
 253	return ret;
 254}
 255
 256static void xhci_remove_stream_mapping(struct xhci_ring *ring)
 257{
 258	struct xhci_segment *seg;
 259
 260	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
 261		return;
 262
 263	seg = ring->first_seg;
 264	do {
 265		xhci_remove_segment_mapping(ring->trb_address_map, seg);
 266		seg = seg->next;
 267	} while (seg != ring->first_seg);
 268}
 269
 270static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
 271{
 272	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
 273			ring->first_seg, ring->last_seg, mem_flags);
 274}
 275
 276/* XXX: Do we need the hcd structure in all these functions? */
 277void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
 278{
 279	if (!ring)
 280		return;
 281
 282	trace_xhci_ring_free(ring);
 283
 284	if (ring->first_seg) {
 285		if (ring->type == TYPE_STREAM)
 286			xhci_remove_stream_mapping(ring);
 287		xhci_free_segments_for_ring(xhci, ring->first_seg);
 288	}
 289
 290	kfree(ring);
 291}
 292
 293static void xhci_initialize_ring_info(struct xhci_ring *ring,
 294					unsigned int cycle_state)
 295{
 296	/* The ring is empty, so the enqueue pointer == dequeue pointer */
 297	ring->enqueue = ring->first_seg->trbs;
 298	ring->enq_seg = ring->first_seg;
 299	ring->dequeue = ring->enqueue;
 300	ring->deq_seg = ring->first_seg;
 301	/* The ring is initialized to 0. The producer must write 1 to the cycle
 302	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
 303	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
 304	 *
 305	 * New rings are initialized with cycle state equal to 1; if we are
 306	 * handling ring expansion, set the cycle state equal to the old ring.
 307	 */
 308	ring->cycle_state = cycle_state;
 309
 310	/*
 311	 * Each segment has a link TRB, and leave an extra TRB for SW
 312	 * accounting purpose
 313	 */
 314	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
 315}
 316
 317/* Allocate segments and link them for a ring */
 318static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
 319		struct xhci_segment **first, struct xhci_segment **last,
 320		unsigned int num_segs, unsigned int cycle_state,
 321		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 322{
 323	struct xhci_segment *prev;
 
 
 
 
 
 
 324
 325	prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
 326	if (!prev)
 327		return -ENOMEM;
 328	num_segs--;
 329
 330	*first = prev;
 331	while (num_segs > 0) {
 332		struct xhci_segment	*next;
 333
 334		next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
 335		if (!next) {
 336			prev = *first;
 337			while (prev) {
 338				next = prev->next;
 339				xhci_segment_free(xhci, prev);
 340				prev = next;
 341			}
 342			return -ENOMEM;
 343		}
 344		xhci_link_segments(xhci, prev, next, type);
 345
 346		prev = next;
 347		num_segs--;
 348	}
 349	xhci_link_segments(xhci, prev, *first, type);
 350	*last = prev;
 351
 352	return 0;
 353}
 354
 355/**
 356 * Create a new ring with zero or more segments.
 357 *
 358 * Link each segment together into a ring.
 359 * Set the end flag and the cycle toggle bit on the last segment.
 360 * See section 4.9.1 and figures 15 and 16.
 361 */
 362struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
 363		unsigned int num_segs, unsigned int cycle_state,
 364		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 365{
 366	struct xhci_ring	*ring;
 367	int ret;
 368	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 369
 370	ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
 371	if (!ring)
 372		return NULL;
 373
 374	ring->num_segs = num_segs;
 375	ring->bounce_buf_len = max_packet;
 376	INIT_LIST_HEAD(&ring->td_list);
 377	ring->type = type;
 378	if (num_segs == 0)
 379		return ring;
 380
 381	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
 382			&ring->last_seg, num_segs, cycle_state, type,
 383			max_packet, flags);
 384	if (ret)
 385		goto fail;
 386
 387	/* Only event ring does not use link TRB */
 388	if (type != TYPE_EVENT) {
 389		/* See section 4.9.2.1 and 6.4.4.1 */
 390		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
 391			cpu_to_le32(LINK_TOGGLE);
 392	}
 393	xhci_initialize_ring_info(ring, cycle_state);
 394	trace_xhci_ring_alloc(ring);
 395	return ring;
 396
 397fail:
 398	kfree(ring);
 399	return NULL;
 400}
 401
 402void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
 403		struct xhci_virt_device *virt_dev,
 404		unsigned int ep_index)
 405{
 406	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
 407	virt_dev->eps[ep_index].ring = NULL;
 408}
 409
 410/*
 411 * Expand an existing ring.
 412 * Allocate a new ring which has same segment numbers and link the two rings.
 413 */
 414int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
 415				unsigned int num_trbs, gfp_t flags)
 416{
 417	struct xhci_segment	*first;
 418	struct xhci_segment	*last;
 419	unsigned int		num_segs;
 420	unsigned int		num_segs_needed;
 421	int			ret;
 422
 423	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
 424				(TRBS_PER_SEGMENT - 1);
 425
 426	/* Allocate number of segments we needed, or double the ring size */
 427	num_segs = ring->num_segs > num_segs_needed ?
 428			ring->num_segs : num_segs_needed;
 429
 430	ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
 431			num_segs, ring->cycle_state, ring->type,
 432			ring->bounce_buf_len, flags);
 433	if (ret)
 434		return -ENOMEM;
 435
 436	if (ring->type == TYPE_STREAM)
 437		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
 438						ring, first, last, flags);
 439	if (ret) {
 440		struct xhci_segment *next;
 441		do {
 442			next = first->next;
 443			xhci_segment_free(xhci, first);
 444			if (first == last)
 445				break;
 446			first = next;
 447		} while (true);
 448		return ret;
 449	}
 450
 451	xhci_link_rings(xhci, ring, first, last, num_segs);
 452	trace_xhci_ring_expansion(ring);
 453	xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
 454			"ring expansion succeed, now has %d segments",
 455			ring->num_segs);
 456
 457	return 0;
 458}
 459
 460struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
 461						    int type, gfp_t flags)
 462{
 463	struct xhci_container_ctx *ctx;
 464	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 465
 466	if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
 467		return NULL;
 468
 469	ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
 470	if (!ctx)
 471		return NULL;
 472
 473	ctx->type = type;
 474	ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
 475	if (type == XHCI_CTX_TYPE_INPUT)
 476		ctx->size += CTX_SIZE(xhci->hcc_params);
 477
 478	ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
 479	if (!ctx->bytes) {
 480		kfree(ctx);
 481		return NULL;
 482	}
 483	return ctx;
 484}
 485
 486void xhci_free_container_ctx(struct xhci_hcd *xhci,
 487			     struct xhci_container_ctx *ctx)
 488{
 489	if (!ctx)
 490		return;
 491	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
 492	kfree(ctx);
 493}
 494
 495struct xhci_input_control_ctx *xhci_get_input_control_ctx(
 496					      struct xhci_container_ctx *ctx)
 497{
 498	if (ctx->type != XHCI_CTX_TYPE_INPUT)
 499		return NULL;
 500
 501	return (struct xhci_input_control_ctx *)ctx->bytes;
 502}
 503
 504struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
 505					struct xhci_container_ctx *ctx)
 506{
 507	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
 508		return (struct xhci_slot_ctx *)ctx->bytes;
 509
 510	return (struct xhci_slot_ctx *)
 511		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
 512}
 513
 514struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
 515				    struct xhci_container_ctx *ctx,
 516				    unsigned int ep_index)
 517{
 518	/* increment ep index by offset of start of ep ctx array */
 519	ep_index++;
 520	if (ctx->type == XHCI_CTX_TYPE_INPUT)
 521		ep_index++;
 522
 523	return (struct xhci_ep_ctx *)
 524		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
 525}
 526
 527
 528/***************** Streams structures manipulation *************************/
 529
 530static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
 531		unsigned int num_stream_ctxs,
 532		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
 533{
 534	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 535	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
 536
 537	if (size > MEDIUM_STREAM_ARRAY_SIZE)
 538		dma_free_coherent(dev, size,
 539				stream_ctx, dma);
 540	else if (size <= SMALL_STREAM_ARRAY_SIZE)
 541		return dma_pool_free(xhci->small_streams_pool,
 542				stream_ctx, dma);
 543	else
 544		return dma_pool_free(xhci->medium_streams_pool,
 545				stream_ctx, dma);
 546}
 547
 548/*
 549 * The stream context array for each endpoint with bulk streams enabled can
 550 * vary in size, based on:
 551 *  - how many streams the endpoint supports,
 552 *  - the maximum primary stream array size the host controller supports,
 553 *  - and how many streams the device driver asks for.
 554 *
 555 * The stream context array must be a power of 2, and can be as small as
 556 * 64 bytes or as large as 1MB.
 557 */
 558static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
 559		unsigned int num_stream_ctxs, dma_addr_t *dma,
 560		gfp_t mem_flags)
 561{
 562	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 563	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
 564
 565	if (size > MEDIUM_STREAM_ARRAY_SIZE)
 566		return dma_alloc_coherent(dev, size,
 567				dma, mem_flags);
 568	else if (size <= SMALL_STREAM_ARRAY_SIZE)
 569		return dma_pool_alloc(xhci->small_streams_pool,
 570				mem_flags, dma);
 571	else
 572		return dma_pool_alloc(xhci->medium_streams_pool,
 573				mem_flags, dma);
 574}
 575
 576struct xhci_ring *xhci_dma_to_transfer_ring(
 577		struct xhci_virt_ep *ep,
 578		u64 address)
 579{
 580	if (ep->ep_state & EP_HAS_STREAMS)
 581		return radix_tree_lookup(&ep->stream_info->trb_address_map,
 582				address >> TRB_SEGMENT_SHIFT);
 583	return ep->ring;
 584}
 585
 586struct xhci_ring *xhci_stream_id_to_ring(
 587		struct xhci_virt_device *dev,
 588		unsigned int ep_index,
 589		unsigned int stream_id)
 590{
 591	struct xhci_virt_ep *ep = &dev->eps[ep_index];
 592
 593	if (stream_id == 0)
 594		return ep->ring;
 595	if (!ep->stream_info)
 596		return NULL;
 597
 598	if (stream_id >= ep->stream_info->num_streams)
 599		return NULL;
 600	return ep->stream_info->stream_rings[stream_id];
 601}
 602
 603/*
 604 * Change an endpoint's internal structure so it supports stream IDs.  The
 605 * number of requested streams includes stream 0, which cannot be used by device
 606 * drivers.
 607 *
 608 * The number of stream contexts in the stream context array may be bigger than
 609 * the number of streams the driver wants to use.  This is because the number of
 610 * stream context array entries must be a power of two.
 611 */
 612struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
 613		unsigned int num_stream_ctxs,
 614		unsigned int num_streams,
 615		unsigned int max_packet, gfp_t mem_flags)
 616{
 617	struct xhci_stream_info *stream_info;
 618	u32 cur_stream;
 619	struct xhci_ring *cur_ring;
 620	u64 addr;
 621	int ret;
 622	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 623
 624	xhci_dbg(xhci, "Allocating %u streams and %u "
 625			"stream context array entries.\n",
 626			num_streams, num_stream_ctxs);
 627	if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
 628		xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
 629		return NULL;
 630	}
 631	xhci->cmd_ring_reserved_trbs++;
 632
 633	stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
 634			dev_to_node(dev));
 635	if (!stream_info)
 636		goto cleanup_trbs;
 637
 638	stream_info->num_streams = num_streams;
 639	stream_info->num_stream_ctxs = num_stream_ctxs;
 640
 641	/* Initialize the array of virtual pointers to stream rings. */
 642	stream_info->stream_rings = kcalloc_node(
 643			num_streams, sizeof(struct xhci_ring *), mem_flags,
 644			dev_to_node(dev));
 645	if (!stream_info->stream_rings)
 646		goto cleanup_info;
 647
 648	/* Initialize the array of DMA addresses for stream rings for the HW. */
 649	stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
 650			num_stream_ctxs, &stream_info->ctx_array_dma,
 651			mem_flags);
 652	if (!stream_info->stream_ctx_array)
 653		goto cleanup_ctx;
 654	memset(stream_info->stream_ctx_array, 0,
 655			sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
 656
 657	/* Allocate everything needed to free the stream rings later */
 658	stream_info->free_streams_command =
 659		xhci_alloc_command_with_ctx(xhci, true, mem_flags);
 660	if (!stream_info->free_streams_command)
 661		goto cleanup_ctx;
 662
 663	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
 664
 665	/* Allocate rings for all the streams that the driver will use,
 666	 * and add their segment DMA addresses to the radix tree.
 667	 * Stream 0 is reserved.
 668	 */
 669
 670	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
 671		stream_info->stream_rings[cur_stream] =
 672			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
 673					mem_flags);
 674		cur_ring = stream_info->stream_rings[cur_stream];
 675		if (!cur_ring)
 676			goto cleanup_rings;
 677		cur_ring->stream_id = cur_stream;
 678		cur_ring->trb_address_map = &stream_info->trb_address_map;
 679		/* Set deq ptr, cycle bit, and stream context type */
 680		addr = cur_ring->first_seg->dma |
 681			SCT_FOR_CTX(SCT_PRI_TR) |
 682			cur_ring->cycle_state;
 683		stream_info->stream_ctx_array[cur_stream].stream_ring =
 684			cpu_to_le64(addr);
 685		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
 686				cur_stream, (unsigned long long) addr);
 687
 688		ret = xhci_update_stream_mapping(cur_ring, mem_flags);
 689		if (ret) {
 690			xhci_ring_free(xhci, cur_ring);
 691			stream_info->stream_rings[cur_stream] = NULL;
 692			goto cleanup_rings;
 693		}
 694	}
 695	/* Leave the other unused stream ring pointers in the stream context
 696	 * array initialized to zero.  This will cause the xHC to give us an
 697	 * error if the device asks for a stream ID we don't have setup (if it
 698	 * was any other way, the host controller would assume the ring is
 699	 * "empty" and wait forever for data to be queued to that stream ID).
 700	 */
 701
 702	return stream_info;
 703
 704cleanup_rings:
 705	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
 706		cur_ring = stream_info->stream_rings[cur_stream];
 707		if (cur_ring) {
 708			xhci_ring_free(xhci, cur_ring);
 709			stream_info->stream_rings[cur_stream] = NULL;
 710		}
 711	}
 712	xhci_free_command(xhci, stream_info->free_streams_command);
 713cleanup_ctx:
 
 
 
 
 
 714	kfree(stream_info->stream_rings);
 715cleanup_info:
 716	kfree(stream_info);
 717cleanup_trbs:
 718	xhci->cmd_ring_reserved_trbs--;
 719	return NULL;
 720}
 721/*
 722 * Sets the MaxPStreams field and the Linear Stream Array field.
 723 * Sets the dequeue pointer to the stream context array.
 724 */
 725void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
 726		struct xhci_ep_ctx *ep_ctx,
 727		struct xhci_stream_info *stream_info)
 728{
 729	u32 max_primary_streams;
 730	/* MaxPStreams is the number of stream context array entries, not the
 731	 * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
 732	 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
 733	 */
 734	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
 735	xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
 736			"Setting number of stream ctx array entries to %u",
 737			1 << (max_primary_streams + 1));
 738	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
 739	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
 740				       | EP_HAS_LSA);
 741	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
 742}
 743
 744/*
 745 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
 746 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
 747 * not at the beginning of the ring).
 748 */
 749void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
 750		struct xhci_virt_ep *ep)
 751{
 752	dma_addr_t addr;
 753	ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
 754	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
 755	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
 756}
 757
 758/* Frees all stream contexts associated with the endpoint,
 759 *
 760 * Caller should fix the endpoint context streams fields.
 761 */
 762void xhci_free_stream_info(struct xhci_hcd *xhci,
 763		struct xhci_stream_info *stream_info)
 764{
 765	int cur_stream;
 766	struct xhci_ring *cur_ring;
 767
 768	if (!stream_info)
 769		return;
 770
 771	for (cur_stream = 1; cur_stream < stream_info->num_streams;
 772			cur_stream++) {
 773		cur_ring = stream_info->stream_rings[cur_stream];
 774		if (cur_ring) {
 775			xhci_ring_free(xhci, cur_ring);
 776			stream_info->stream_rings[cur_stream] = NULL;
 777		}
 778	}
 779	xhci_free_command(xhci, stream_info->free_streams_command);
 780	xhci->cmd_ring_reserved_trbs--;
 781	if (stream_info->stream_ctx_array)
 782		xhci_free_stream_ctx(xhci,
 783				stream_info->num_stream_ctxs,
 784				stream_info->stream_ctx_array,
 785				stream_info->ctx_array_dma);
 786
 787	kfree(stream_info->stream_rings);
 788	kfree(stream_info);
 789}
 790
 791
 792/***************** Device context manipulation *************************/
 793
 794static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
 795		struct xhci_virt_ep *ep)
 796{
 797	timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
 798		    0);
 799	ep->xhci = xhci;
 800}
 801
 802static void xhci_free_tt_info(struct xhci_hcd *xhci,
 803		struct xhci_virt_device *virt_dev,
 804		int slot_id)
 805{
 806	struct list_head *tt_list_head;
 807	struct xhci_tt_bw_info *tt_info, *next;
 808	bool slot_found = false;
 809
 810	/* If the device never made it past the Set Address stage,
 811	 * it may not have the real_port set correctly.
 812	 */
 813	if (virt_dev->real_port == 0 ||
 814			virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
 815		xhci_dbg(xhci, "Bad real port.\n");
 816		return;
 817	}
 818
 819	tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
 820	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
 821		/* Multi-TT hubs will have more than one entry */
 822		if (tt_info->slot_id == slot_id) {
 823			slot_found = true;
 824			list_del(&tt_info->tt_list);
 825			kfree(tt_info);
 826		} else if (slot_found) {
 827			break;
 828		}
 829	}
 830}
 831
 832int xhci_alloc_tt_info(struct xhci_hcd *xhci,
 833		struct xhci_virt_device *virt_dev,
 834		struct usb_device *hdev,
 835		struct usb_tt *tt, gfp_t mem_flags)
 836{
 837	struct xhci_tt_bw_info		*tt_info;
 838	unsigned int			num_ports;
 839	int				i, j;
 840	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 841
 842	if (!tt->multi)
 843		num_ports = 1;
 844	else
 845		num_ports = hdev->maxchild;
 846
 847	for (i = 0; i < num_ports; i++, tt_info++) {
 848		struct xhci_interval_bw_table *bw_table;
 849
 850		tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
 851				dev_to_node(dev));
 852		if (!tt_info)
 853			goto free_tts;
 854		INIT_LIST_HEAD(&tt_info->tt_list);
 855		list_add(&tt_info->tt_list,
 856				&xhci->rh_bw[virt_dev->real_port - 1].tts);
 857		tt_info->slot_id = virt_dev->udev->slot_id;
 858		if (tt->multi)
 859			tt_info->ttport = i+1;
 860		bw_table = &tt_info->bw_table;
 861		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
 862			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
 863	}
 864	return 0;
 865
 866free_tts:
 867	xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
 868	return -ENOMEM;
 869}
 870
 871
 872/* All the xhci_tds in the ring's TD list should be freed at this point.
 873 * Should be called with xhci->lock held if there is any chance the TT lists
 874 * will be manipulated by the configure endpoint, allocate device, or update
 875 * hub functions while this function is removing the TT entries from the list.
 876 */
 877void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 878{
 879	struct xhci_virt_device *dev;
 880	int i;
 881	int old_active_eps = 0;
 882
 883	/* Slot ID 0 is reserved */
 884	if (slot_id == 0 || !xhci->devs[slot_id])
 885		return;
 886
 887	dev = xhci->devs[slot_id];
 888
 889	xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
 890	if (!dev)
 891		return;
 892
 893	trace_xhci_free_virt_device(dev);
 894
 895	if (dev->tt_info)
 896		old_active_eps = dev->tt_info->active_eps;
 897
 898	for (i = 0; i < 31; i++) {
 899		if (dev->eps[i].ring)
 900			xhci_ring_free(xhci, dev->eps[i].ring);
 901		if (dev->eps[i].stream_info)
 902			xhci_free_stream_info(xhci,
 903					dev->eps[i].stream_info);
 904		/* Endpoints on the TT/root port lists should have been removed
 905		 * when usb_disable_device() was called for the device.
 906		 * We can't drop them anyway, because the udev might have gone
 907		 * away by this point, and we can't tell what speed it was.
 
 
 908		 */
 909		if (!list_empty(&dev->eps[i].bw_endpoint_list))
 910			xhci_warn(xhci, "Slot %u endpoint %u "
 911					"not removed from BW list!\n",
 912					slot_id, i);
 
 
 913	}
 914	/* If this is a hub, free the TT(s) from the TT list */
 915	xhci_free_tt_info(xhci, dev, slot_id);
 916	/* If necessary, update the number of active TTs on this root port */
 917	xhci_update_tt_active_eps(xhci, dev, old_active_eps);
 918
 919	if (dev->in_ctx)
 920		xhci_free_container_ctx(xhci, dev->in_ctx);
 921	if (dev->out_ctx)
 922		xhci_free_container_ctx(xhci, dev->out_ctx);
 923
 924	if (dev->udev && dev->udev->slot_id)
 925		dev->udev->slot_id = 0;
 926	kfree(xhci->devs[slot_id]);
 927	xhci->devs[slot_id] = NULL;
 928}
 929
 930/*
 931 * Free a virt_device structure.
 932 * If the virt_device added a tt_info (a hub) and has children pointing to
 933 * that tt_info, then free the child first. Recursive.
 934 * We can't rely on udev at this point to find child-parent relationships.
 935 */
 936static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
 937{
 938	struct xhci_virt_device *vdev;
 939	struct list_head *tt_list_head;
 940	struct xhci_tt_bw_info *tt_info, *next;
 941	int i;
 942
 943	vdev = xhci->devs[slot_id];
 944	if (!vdev)
 945		return;
 946
 947	if (vdev->real_port == 0 ||
 948			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
 949		xhci_dbg(xhci, "Bad vdev->real_port.\n");
 950		goto out;
 951	}
 952
 953	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
 954	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
 955		/* is this a hub device that added a tt_info to the tts list */
 956		if (tt_info->slot_id == slot_id) {
 957			/* are any devices using this tt_info? */
 958			for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
 959				vdev = xhci->devs[i];
 960				if (vdev && (vdev->tt_info == tt_info))
 961					xhci_free_virt_devices_depth_first(
 962						xhci, i);
 963			}
 964		}
 965	}
 966out:
 967	/* we are now at a leaf device */
 968	xhci_debugfs_remove_slot(xhci, slot_id);
 969	xhci_free_virt_device(xhci, slot_id);
 970}
 971
 972int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 973		struct usb_device *udev, gfp_t flags)
 974{
 975	struct xhci_virt_device *dev;
 976	int i;
 977
 978	/* Slot ID 0 is reserved */
 979	if (slot_id == 0 || xhci->devs[slot_id]) {
 980		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
 981		return 0;
 982	}
 983
 984	dev = kzalloc(sizeof(*dev), flags);
 985	if (!dev)
 986		return 0;
 987
 
 
 988	/* Allocate the (output) device context that will be used in the HC. */
 989	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
 990	if (!dev->out_ctx)
 991		goto fail;
 992
 993	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
 994			(unsigned long long)dev->out_ctx->dma);
 995
 996	/* Allocate the (input) device context for address device command */
 997	dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
 998	if (!dev->in_ctx)
 999		goto fail;
1000
1001	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
1002			(unsigned long long)dev->in_ctx->dma);
1003
1004	/* Initialize the cancellation list and watchdog timers for each ep */
1005	for (i = 0; i < 31; i++) {
1006		xhci_init_endpoint_timer(xhci, &dev->eps[i]);
 
 
1007		INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1008		INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1009	}
1010
1011	/* Allocate endpoint 0 ring */
1012	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1013	if (!dev->eps[0].ring)
1014		goto fail;
1015
1016	dev->udev = udev;
1017
1018	/* Point to output device context in dcbaa. */
1019	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1020	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1021		 slot_id,
1022		 &xhci->dcbaa->dev_context_ptrs[slot_id],
1023		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1024
1025	trace_xhci_alloc_virt_device(dev);
1026
1027	xhci->devs[slot_id] = dev;
1028
1029	return 1;
1030fail:
1031
1032	if (dev->in_ctx)
1033		xhci_free_container_ctx(xhci, dev->in_ctx);
1034	if (dev->out_ctx)
1035		xhci_free_container_ctx(xhci, dev->out_ctx);
1036	kfree(dev);
1037
1038	return 0;
1039}
1040
1041void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1042		struct usb_device *udev)
1043{
1044	struct xhci_virt_device *virt_dev;
1045	struct xhci_ep_ctx	*ep0_ctx;
1046	struct xhci_ring	*ep_ring;
1047
1048	virt_dev = xhci->devs[udev->slot_id];
1049	ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1050	ep_ring = virt_dev->eps[0].ring;
1051	/*
1052	 * FIXME we don't keep track of the dequeue pointer very well after a
1053	 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1054	 * host to our enqueue pointer.  This should only be called after a
1055	 * configured device has reset, so all control transfers should have
1056	 * been completed or cancelled before the reset.
1057	 */
1058	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1059							ep_ring->enqueue)
1060				   | ep_ring->cycle_state);
1061}
1062
1063/*
1064 * The xHCI roothub may have ports of differing speeds in any order in the port
1065 * status registers.
1066 *
1067 * The xHCI hardware wants to know the roothub port number that the USB device
1068 * is attached to (or the roothub port its ancestor hub is attached to).  All we
1069 * know is the index of that port under either the USB 2.0 or the USB 3.0
1070 * roothub, but that doesn't give us the real index into the HW port status
1071 * registers. Call xhci_find_raw_port_number() to get real index.
1072 */
1073static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1074		struct usb_device *udev)
1075{
1076	struct usb_device *top_dev;
1077	struct usb_hcd *hcd;
1078
1079	if (udev->speed >= USB_SPEED_SUPER)
1080		hcd = xhci->shared_hcd;
1081	else
1082		hcd = xhci->main_hcd;
1083
1084	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1085			top_dev = top_dev->parent)
1086		/* Found device below root hub */;
1087
1088	return	xhci_find_raw_port_number(hcd, top_dev->portnum);
1089}
1090
1091/* Setup an xHCI virtual device for a Set Address command */
1092int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1093{
1094	struct xhci_virt_device *dev;
1095	struct xhci_ep_ctx	*ep0_ctx;
1096	struct xhci_slot_ctx    *slot_ctx;
1097	u32			port_num;
1098	u32			max_packets;
1099	struct usb_device *top_dev;
1100
1101	dev = xhci->devs[udev->slot_id];
1102	/* Slot ID 0 is reserved */
1103	if (udev->slot_id == 0 || !dev) {
1104		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1105				udev->slot_id);
1106		return -EINVAL;
1107	}
1108	ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1109	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1110
1111	/* 3) Only the control endpoint is valid - one endpoint context */
1112	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1113	switch (udev->speed) {
1114	case USB_SPEED_SUPER_PLUS:
1115		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1116		max_packets = MAX_PACKET(512);
1117		break;
1118	case USB_SPEED_SUPER:
1119		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1120		max_packets = MAX_PACKET(512);
1121		break;
1122	case USB_SPEED_HIGH:
1123		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1124		max_packets = MAX_PACKET(64);
1125		break;
1126	/* USB core guesses at a 64-byte max packet first for FS devices */
1127	case USB_SPEED_FULL:
1128		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1129		max_packets = MAX_PACKET(64);
1130		break;
1131	case USB_SPEED_LOW:
1132		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1133		max_packets = MAX_PACKET(8);
1134		break;
1135	case USB_SPEED_WIRELESS:
1136		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1137		return -EINVAL;
1138		break;
1139	default:
1140		/* Speed was set earlier, this shouldn't happen. */
1141		return -EINVAL;
1142	}
1143	/* Find the root hub port this device is under */
1144	port_num = xhci_find_real_port_number(xhci, udev);
1145	if (!port_num)
1146		return -EINVAL;
1147	slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1148	/* Set the port number in the virtual_device to the faked port number */
1149	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1150			top_dev = top_dev->parent)
1151		/* Found device below root hub */;
1152	dev->fake_port = top_dev->portnum;
1153	dev->real_port = port_num;
1154	xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1155	xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1156
1157	/* Find the right bandwidth table that this device will be a part of.
1158	 * If this is a full speed device attached directly to a root port (or a
1159	 * decendent of one), it counts as a primary bandwidth domain, not a
1160	 * secondary bandwidth domain under a TT.  An xhci_tt_info structure
1161	 * will never be created for the HS root hub.
1162	 */
1163	if (!udev->tt || !udev->tt->hub->parent) {
1164		dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1165	} else {
1166		struct xhci_root_port_bw_info *rh_bw;
1167		struct xhci_tt_bw_info *tt_bw;
1168
1169		rh_bw = &xhci->rh_bw[port_num - 1];
1170		/* Find the right TT. */
1171		list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1172			if (tt_bw->slot_id != udev->tt->hub->slot_id)
1173				continue;
1174
1175			if (!dev->udev->tt->multi ||
1176					(udev->tt->multi &&
1177					 tt_bw->ttport == dev->udev->ttport)) {
1178				dev->bw_table = &tt_bw->bw_table;
1179				dev->tt_info = tt_bw;
1180				break;
1181			}
1182		}
1183		if (!dev->tt_info)
1184			xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1185	}
1186
1187	/* Is this a LS/FS device under an external HS hub? */
1188	if (udev->tt && udev->tt->hub->parent) {
1189		slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1190						(udev->ttport << 8));
1191		if (udev->tt->multi)
1192			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1193	}
1194	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1195	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1196
1197	/* Step 4 - ring already allocated */
1198	/* Step 5 */
1199	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1200
1201	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1202	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1203					 max_packets);
1204
1205	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1206				   dev->eps[0].ring->cycle_state);
1207
1208	trace_xhci_setup_addressable_virt_device(dev);
1209
1210	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1211
1212	return 0;
1213}
1214
1215/*
1216 * Convert interval expressed as 2^(bInterval - 1) == interval into
1217 * straight exponent value 2^n == interval.
1218 *
1219 */
1220static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1221		struct usb_host_endpoint *ep)
1222{
1223	unsigned int interval;
1224
1225	interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1226	if (interval != ep->desc.bInterval - 1)
1227		dev_warn(&udev->dev,
1228			 "ep %#x - rounding interval to %d %sframes\n",
1229			 ep->desc.bEndpointAddress,
1230			 1 << interval,
1231			 udev->speed == USB_SPEED_FULL ? "" : "micro");
1232
1233	if (udev->speed == USB_SPEED_FULL) {
1234		/*
1235		 * Full speed isoc endpoints specify interval in frames,
1236		 * not microframes. We are using microframes everywhere,
1237		 * so adjust accordingly.
1238		 */
1239		interval += 3;	/* 1 frame = 2^3 uframes */
1240	}
1241
1242	return interval;
1243}
1244
1245/*
1246 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1247 * microframes, rounded down to nearest power of 2.
1248 */
1249static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1250		struct usb_host_endpoint *ep, unsigned int desc_interval,
1251		unsigned int min_exponent, unsigned int max_exponent)
1252{
1253	unsigned int interval;
1254
1255	interval = fls(desc_interval) - 1;
1256	interval = clamp_val(interval, min_exponent, max_exponent);
1257	if ((1 << interval) != desc_interval)
1258		dev_dbg(&udev->dev,
1259			 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1260			 ep->desc.bEndpointAddress,
1261			 1 << interval,
1262			 desc_interval);
1263
1264	return interval;
1265}
1266
1267static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1268		struct usb_host_endpoint *ep)
1269{
1270	if (ep->desc.bInterval == 0)
1271		return 0;
1272	return xhci_microframes_to_exponent(udev, ep,
1273			ep->desc.bInterval, 0, 15);
1274}
1275
1276
1277static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1278		struct usb_host_endpoint *ep)
1279{
1280	return xhci_microframes_to_exponent(udev, ep,
1281			ep->desc.bInterval * 8, 3, 10);
1282}
1283
1284/* Return the polling or NAK interval.
1285 *
1286 * The polling interval is expressed in "microframes".  If xHCI's Interval field
1287 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1288 *
1289 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1290 * is set to 0.
1291 */
1292static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1293		struct usb_host_endpoint *ep)
1294{
1295	unsigned int interval = 0;
1296
1297	switch (udev->speed) {
1298	case USB_SPEED_HIGH:
1299		/* Max NAK rate */
1300		if (usb_endpoint_xfer_control(&ep->desc) ||
1301		    usb_endpoint_xfer_bulk(&ep->desc)) {
1302			interval = xhci_parse_microframe_interval(udev, ep);
1303			break;
1304		}
1305		/* Fall through - SS and HS isoc/int have same decoding */
1306
1307	case USB_SPEED_SUPER_PLUS:
1308	case USB_SPEED_SUPER:
1309		if (usb_endpoint_xfer_int(&ep->desc) ||
1310		    usb_endpoint_xfer_isoc(&ep->desc)) {
1311			interval = xhci_parse_exponent_interval(udev, ep);
1312		}
1313		break;
1314
1315	case USB_SPEED_FULL:
1316		if (usb_endpoint_xfer_isoc(&ep->desc)) {
1317			interval = xhci_parse_exponent_interval(udev, ep);
1318			break;
1319		}
1320		/*
1321		 * Fall through for interrupt endpoint interval decoding
1322		 * since it uses the same rules as low speed interrupt
1323		 * endpoints.
1324		 */
1325		/* fall through */
1326
1327	case USB_SPEED_LOW:
1328		if (usb_endpoint_xfer_int(&ep->desc) ||
1329		    usb_endpoint_xfer_isoc(&ep->desc)) {
1330
1331			interval = xhci_parse_frame_interval(udev, ep);
1332		}
1333		break;
1334
1335	default:
1336		BUG();
1337	}
1338	return interval;
1339}
1340
1341/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1342 * High speed endpoint descriptors can define "the number of additional
1343 * transaction opportunities per microframe", but that goes in the Max Burst
1344 * endpoint context field.
1345 */
1346static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1347		struct usb_host_endpoint *ep)
1348{
1349	if (udev->speed < USB_SPEED_SUPER ||
1350			!usb_endpoint_xfer_isoc(&ep->desc))
1351		return 0;
1352	return ep->ss_ep_comp.bmAttributes;
1353}
1354
1355static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1356				       struct usb_host_endpoint *ep)
1357{
1358	/* Super speed and Plus have max burst in ep companion desc */
1359	if (udev->speed >= USB_SPEED_SUPER)
1360		return ep->ss_ep_comp.bMaxBurst;
1361
1362	if (udev->speed == USB_SPEED_HIGH &&
1363	    (usb_endpoint_xfer_isoc(&ep->desc) ||
1364	     usb_endpoint_xfer_int(&ep->desc)))
1365		return usb_endpoint_maxp_mult(&ep->desc) - 1;
1366
1367	return 0;
1368}
1369
1370static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1371{
1372	int in;
1373
1374	in = usb_endpoint_dir_in(&ep->desc);
1375
1376	switch (usb_endpoint_type(&ep->desc)) {
1377	case USB_ENDPOINT_XFER_CONTROL:
1378		return CTRL_EP;
1379	case USB_ENDPOINT_XFER_BULK:
1380		return in ? BULK_IN_EP : BULK_OUT_EP;
1381	case USB_ENDPOINT_XFER_ISOC:
1382		return in ? ISOC_IN_EP : ISOC_OUT_EP;
1383	case USB_ENDPOINT_XFER_INT:
1384		return in ? INT_IN_EP : INT_OUT_EP;
1385	}
1386	return 0;
1387}
1388
1389/* Return the maximum endpoint service interval time (ESIT) payload.
1390 * Basically, this is the maxpacket size, multiplied by the burst size
1391 * and mult size.
1392 */
1393static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1394		struct usb_host_endpoint *ep)
1395{
1396	int max_burst;
1397	int max_packet;
1398
1399	/* Only applies for interrupt or isochronous endpoints */
1400	if (usb_endpoint_xfer_control(&ep->desc) ||
1401			usb_endpoint_xfer_bulk(&ep->desc))
1402		return 0;
1403
1404	/* SuperSpeedPlus Isoc ep sending over 48k per esit */
1405	if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1406	    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1407		return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1408	/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1409	else if (udev->speed >= USB_SPEED_SUPER)
1410		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1411
1412	max_packet = usb_endpoint_maxp(&ep->desc);
1413	max_burst = usb_endpoint_maxp_mult(&ep->desc);
1414	/* A 0 in max burst means 1 transfer per ESIT */
1415	return max_packet * max_burst;
1416}
1417
1418/* Set up an endpoint with one ring segment.  Do not allocate stream rings.
1419 * Drivers will have to call usb_alloc_streams() to do that.
1420 */
1421int xhci_endpoint_init(struct xhci_hcd *xhci,
1422		struct xhci_virt_device *virt_dev,
1423		struct usb_device *udev,
1424		struct usb_host_endpoint *ep,
1425		gfp_t mem_flags)
1426{
1427	unsigned int ep_index;
1428	struct xhci_ep_ctx *ep_ctx;
1429	struct xhci_ring *ep_ring;
1430	unsigned int max_packet;
1431	enum xhci_ring_type ring_type;
1432	u32 max_esit_payload;
1433	u32 endpoint_type;
1434	unsigned int max_burst;
1435	unsigned int interval;
1436	unsigned int mult;
1437	unsigned int avg_trb_len;
1438	unsigned int err_count = 0;
1439
1440	ep_index = xhci_get_endpoint_index(&ep->desc);
1441	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1442
1443	endpoint_type = xhci_get_endpoint_type(ep);
1444	if (!endpoint_type)
1445		return -EINVAL;
1446
1447	ring_type = usb_endpoint_type(&ep->desc);
1448
1449	/*
1450	 * Get values to fill the endpoint context, mostly from ep descriptor.
1451	 * The average TRB buffer lengt for bulk endpoints is unclear as we
1452	 * have no clue on scatter gather list entry size. For Isoc and Int,
1453	 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1454	 */
1455	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1456	interval = xhci_get_endpoint_interval(udev, ep);
1457
1458	/* Periodic endpoint bInterval limit quirk */
1459	if (usb_endpoint_xfer_int(&ep->desc) ||
1460	    usb_endpoint_xfer_isoc(&ep->desc)) {
1461		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1462		    udev->speed >= USB_SPEED_HIGH &&
1463		    interval >= 7) {
1464			interval = 6;
1465		}
1466	}
1467
1468	mult = xhci_get_endpoint_mult(udev, ep);
1469	max_packet = usb_endpoint_maxp(&ep->desc);
1470	max_burst = xhci_get_endpoint_max_burst(udev, ep);
1471	avg_trb_len = max_esit_payload;
1472
1473	/* FIXME dig Mult and streams info out of ep companion desc */
1474
1475	/* Allow 3 retries for everything but isoc, set CErr = 3 */
1476	if (!usb_endpoint_xfer_isoc(&ep->desc))
1477		err_count = 3;
1478	/* Some devices get this wrong */
1479	if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
1480		max_packet = 512;
 
 
 
 
 
 
1481	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1482	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1483		avg_trb_len = 8;
1484	/* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1485	if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1486		mult = 0;
1487
1488	/* Set up the endpoint ring */
1489	virt_dev->eps[ep_index].new_ring =
1490		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1491	if (!virt_dev->eps[ep_index].new_ring)
1492		return -ENOMEM;
1493
1494	virt_dev->eps[ep_index].skip = false;
1495	ep_ring = virt_dev->eps[ep_index].new_ring;
1496
1497	/* Fill the endpoint context */
1498	ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1499				      EP_INTERVAL(interval) |
1500				      EP_MULT(mult));
1501	ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1502				       MAX_PACKET(max_packet) |
1503				       MAX_BURST(max_burst) |
1504				       ERROR_COUNT(err_count));
1505	ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1506				  ep_ring->cycle_state);
1507
1508	ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1509				      EP_AVG_TRB_LENGTH(avg_trb_len));
1510
1511	return 0;
1512}
1513
1514void xhci_endpoint_zero(struct xhci_hcd *xhci,
1515		struct xhci_virt_device *virt_dev,
1516		struct usb_host_endpoint *ep)
1517{
1518	unsigned int ep_index;
1519	struct xhci_ep_ctx *ep_ctx;
1520
1521	ep_index = xhci_get_endpoint_index(&ep->desc);
1522	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1523
1524	ep_ctx->ep_info = 0;
1525	ep_ctx->ep_info2 = 0;
1526	ep_ctx->deq = 0;
1527	ep_ctx->tx_info = 0;
1528	/* Don't free the endpoint ring until the set interface or configuration
1529	 * request succeeds.
1530	 */
1531}
1532
1533void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1534{
1535	bw_info->ep_interval = 0;
1536	bw_info->mult = 0;
1537	bw_info->num_packets = 0;
1538	bw_info->max_packet_size = 0;
1539	bw_info->type = 0;
1540	bw_info->max_esit_payload = 0;
1541}
1542
1543void xhci_update_bw_info(struct xhci_hcd *xhci,
1544		struct xhci_container_ctx *in_ctx,
1545		struct xhci_input_control_ctx *ctrl_ctx,
1546		struct xhci_virt_device *virt_dev)
1547{
1548	struct xhci_bw_info *bw_info;
1549	struct xhci_ep_ctx *ep_ctx;
1550	unsigned int ep_type;
1551	int i;
1552
1553	for (i = 1; i < 31; i++) {
1554		bw_info = &virt_dev->eps[i].bw_info;
1555
1556		/* We can't tell what endpoint type is being dropped, but
1557		 * unconditionally clearing the bandwidth info for non-periodic
1558		 * endpoints should be harmless because the info will never be
1559		 * set in the first place.
1560		 */
1561		if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1562			/* Dropped endpoint */
1563			xhci_clear_endpoint_bw_info(bw_info);
1564			continue;
1565		}
1566
1567		if (EP_IS_ADDED(ctrl_ctx, i)) {
1568			ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1569			ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1570
1571			/* Ignore non-periodic endpoints */
1572			if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1573					ep_type != ISOC_IN_EP &&
1574					ep_type != INT_IN_EP)
1575				continue;
1576
1577			/* Added or changed endpoint */
1578			bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1579					le32_to_cpu(ep_ctx->ep_info));
1580			/* Number of packets and mult are zero-based in the
1581			 * input context, but we want one-based for the
1582			 * interval table.
1583			 */
1584			bw_info->mult = CTX_TO_EP_MULT(
1585					le32_to_cpu(ep_ctx->ep_info)) + 1;
1586			bw_info->num_packets = CTX_TO_MAX_BURST(
1587					le32_to_cpu(ep_ctx->ep_info2)) + 1;
1588			bw_info->max_packet_size = MAX_PACKET_DECODED(
1589					le32_to_cpu(ep_ctx->ep_info2));
1590			bw_info->type = ep_type;
1591			bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1592					le32_to_cpu(ep_ctx->tx_info));
1593		}
1594	}
1595}
1596
1597/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1598 * Useful when you want to change one particular aspect of the endpoint and then
1599 * issue a configure endpoint command.
1600 */
1601void xhci_endpoint_copy(struct xhci_hcd *xhci,
1602		struct xhci_container_ctx *in_ctx,
1603		struct xhci_container_ctx *out_ctx,
1604		unsigned int ep_index)
1605{
1606	struct xhci_ep_ctx *out_ep_ctx;
1607	struct xhci_ep_ctx *in_ep_ctx;
1608
1609	out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1610	in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1611
1612	in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1613	in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614	in_ep_ctx->deq = out_ep_ctx->deq;
1615	in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616	if (xhci->quirks & XHCI_MTK_HOST) {
1617		in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1618		in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1619	}
1620}
1621
1622/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1623 * Useful when you want to change one particular aspect of the endpoint and then
1624 * issue a configure endpoint command.  Only the context entries field matters,
1625 * but we'll copy the whole thing anyway.
1626 */
1627void xhci_slot_copy(struct xhci_hcd *xhci,
1628		struct xhci_container_ctx *in_ctx,
1629		struct xhci_container_ctx *out_ctx)
1630{
1631	struct xhci_slot_ctx *in_slot_ctx;
1632	struct xhci_slot_ctx *out_slot_ctx;
1633
1634	in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1635	out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1636
1637	in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1638	in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1639	in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1640	in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1641}
1642
1643/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1644static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1645{
1646	int i;
1647	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1648	int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1649
1650	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1651			"Allocating %d scratchpad buffers", num_sp);
1652
1653	if (!num_sp)
1654		return 0;
1655
1656	xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1657				dev_to_node(dev));
1658	if (!xhci->scratchpad)
1659		goto fail_sp;
1660
1661	xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1662				     num_sp * sizeof(u64),
1663				     &xhci->scratchpad->sp_dma, flags);
1664	if (!xhci->scratchpad->sp_array)
1665		goto fail_sp2;
1666
1667	xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1668					flags, dev_to_node(dev));
1669	if (!xhci->scratchpad->sp_buffers)
1670		goto fail_sp3;
1671
1672	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673	for (i = 0; i < num_sp; i++) {
1674		dma_addr_t dma;
1675		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676					       flags);
1677		if (!buf)
1678			goto fail_sp4;
1679
1680		xhci->scratchpad->sp_array[i] = dma;
1681		xhci->scratchpad->sp_buffers[i] = buf;
1682	}
1683
1684	return 0;
1685
1686 fail_sp4:
1687	for (i = i - 1; i >= 0; i--) {
1688		dma_free_coherent(dev, xhci->page_size,
1689				    xhci->scratchpad->sp_buffers[i],
1690				    xhci->scratchpad->sp_array[i]);
1691	}
1692
1693	kfree(xhci->scratchpad->sp_buffers);
1694
1695 fail_sp3:
1696	dma_free_coherent(dev, num_sp * sizeof(u64),
1697			    xhci->scratchpad->sp_array,
1698			    xhci->scratchpad->sp_dma);
1699
1700 fail_sp2:
1701	kfree(xhci->scratchpad);
1702	xhci->scratchpad = NULL;
1703
1704 fail_sp:
1705	return -ENOMEM;
1706}
1707
1708static void scratchpad_free(struct xhci_hcd *xhci)
1709{
1710	int num_sp;
1711	int i;
1712	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1713
1714	if (!xhci->scratchpad)
1715		return;
1716
1717	num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1718
1719	for (i = 0; i < num_sp; i++) {
1720		dma_free_coherent(dev, xhci->page_size,
1721				    xhci->scratchpad->sp_buffers[i],
1722				    xhci->scratchpad->sp_array[i]);
1723	}
1724	kfree(xhci->scratchpad->sp_buffers);
1725	dma_free_coherent(dev, num_sp * sizeof(u64),
1726			    xhci->scratchpad->sp_array,
1727			    xhci->scratchpad->sp_dma);
1728	kfree(xhci->scratchpad);
1729	xhci->scratchpad = NULL;
1730}
1731
1732struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1733		bool allocate_completion, gfp_t mem_flags)
1734{
1735	struct xhci_command *command;
1736	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1737
1738	command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1739	if (!command)
1740		return NULL;
1741
1742	if (allocate_completion) {
1743		command->completion =
1744			kzalloc_node(sizeof(struct completion), mem_flags,
1745				dev_to_node(dev));
1746		if (!command->completion) {
1747			kfree(command);
1748			return NULL;
1749		}
1750		init_completion(command->completion);
1751	}
1752
1753	command->status = 0;
1754	INIT_LIST_HEAD(&command->cmd_list);
1755	return command;
1756}
1757
1758struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1759		bool allocate_completion, gfp_t mem_flags)
1760{
1761	struct xhci_command *command;
1762
1763	command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1764	if (!command)
1765		return NULL;
1766
1767	command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1768						   mem_flags);
1769	if (!command->in_ctx) {
1770		kfree(command->completion);
1771		kfree(command);
1772		return NULL;
1773	}
1774	return command;
1775}
1776
1777void xhci_urb_free_priv(struct urb_priv *urb_priv)
1778{
1779	kfree(urb_priv);
1780}
1781
1782void xhci_free_command(struct xhci_hcd *xhci,
1783		struct xhci_command *command)
1784{
1785	xhci_free_container_ctx(xhci,
1786			command->in_ctx);
1787	kfree(command->completion);
1788	kfree(command);
1789}
1790
1791int xhci_alloc_erst(struct xhci_hcd *xhci,
1792		    struct xhci_ring *evt_ring,
1793		    struct xhci_erst *erst,
1794		    gfp_t flags)
1795{
1796	size_t size;
1797	unsigned int val;
1798	struct xhci_segment *seg;
1799	struct xhci_erst_entry *entry;
1800
1801	size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1802	erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1803					   size, &erst->erst_dma_addr, flags);
1804	if (!erst->entries)
1805		return -ENOMEM;
1806
1807	erst->num_entries = evt_ring->num_segs;
1808
1809	seg = evt_ring->first_seg;
1810	for (val = 0; val < evt_ring->num_segs; val++) {
1811		entry = &erst->entries[val];
1812		entry->seg_addr = cpu_to_le64(seg->dma);
1813		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1814		entry->rsvd = 0;
1815		seg = seg->next;
1816	}
1817
1818	return 0;
1819}
1820
1821void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1822{
1823	size_t size;
1824	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1825
1826	size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1827	if (erst->entries)
1828		dma_free_coherent(dev, size,
1829				erst->entries,
1830				erst->erst_dma_addr);
1831	erst->entries = NULL;
1832}
1833
1834void xhci_mem_cleanup(struct xhci_hcd *xhci)
1835{
1836	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
1837	int i, j, num_ports;
1838
1839	cancel_delayed_work_sync(&xhci->cmd_timer);
1840
1841	xhci_free_erst(xhci, &xhci->erst);
1842
1843	if (xhci->event_ring)
1844		xhci_ring_free(xhci, xhci->event_ring);
1845	xhci->event_ring = NULL;
1846	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1847
1848	if (xhci->lpm_command)
1849		xhci_free_command(xhci, xhci->lpm_command);
1850	xhci->lpm_command = NULL;
1851	if (xhci->cmd_ring)
1852		xhci_ring_free(xhci, xhci->cmd_ring);
1853	xhci->cmd_ring = NULL;
1854	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1855	xhci_cleanup_command_queue(xhci);
1856
1857	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1858	for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1859		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1860		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1861			struct list_head *ep = &bwt->interval_bw[j].endpoints;
1862			while (!list_empty(ep))
1863				list_del_init(ep->next);
1864		}
1865	}
1866
1867	for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1868		xhci_free_virt_devices_depth_first(xhci, i);
1869
1870	dma_pool_destroy(xhci->segment_pool);
1871	xhci->segment_pool = NULL;
1872	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1873
1874	dma_pool_destroy(xhci->device_pool);
1875	xhci->device_pool = NULL;
1876	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1877
1878	dma_pool_destroy(xhci->small_streams_pool);
1879	xhci->small_streams_pool = NULL;
1880	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1881			"Freed small stream array pool");
1882
1883	dma_pool_destroy(xhci->medium_streams_pool);
1884	xhci->medium_streams_pool = NULL;
1885	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1886			"Freed medium stream array pool");
1887
1888	if (xhci->dcbaa)
1889		dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1890				xhci->dcbaa, xhci->dcbaa->dma);
1891	xhci->dcbaa = NULL;
1892
1893	scratchpad_free(xhci);
1894
1895	if (!xhci->rh_bw)
1896		goto no_bw;
1897
1898	for (i = 0; i < num_ports; i++) {
1899		struct xhci_tt_bw_info *tt, *n;
1900		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1901			list_del(&tt->tt_list);
1902			kfree(tt);
1903		}
1904	}
1905
1906no_bw:
1907	xhci->cmd_ring_reserved_trbs = 0;
1908	xhci->usb2_rhub.num_ports = 0;
1909	xhci->usb3_rhub.num_ports = 0;
1910	xhci->num_active_eps = 0;
1911	kfree(xhci->usb2_rhub.ports);
1912	kfree(xhci->usb3_rhub.ports);
1913	kfree(xhci->hw_ports);
1914	kfree(xhci->rh_bw);
1915	kfree(xhci->ext_caps);
 
 
 
 
1916
1917	xhci->usb2_rhub.ports = NULL;
1918	xhci->usb3_rhub.ports = NULL;
1919	xhci->hw_ports = NULL;
1920	xhci->rh_bw = NULL;
1921	xhci->ext_caps = NULL;
 
1922
1923	xhci->page_size = 0;
1924	xhci->page_shift = 0;
1925	xhci->usb2_rhub.bus_state.bus_suspended = 0;
1926	xhci->usb3_rhub.bus_state.bus_suspended = 0;
1927}
1928
1929static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1930		struct xhci_segment *input_seg,
1931		union xhci_trb *start_trb,
1932		union xhci_trb *end_trb,
1933		dma_addr_t input_dma,
1934		struct xhci_segment *result_seg,
1935		char *test_name, int test_number)
1936{
1937	unsigned long long start_dma;
1938	unsigned long long end_dma;
1939	struct xhci_segment *seg;
1940
1941	start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1942	end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1943
1944	seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1945	if (seg != result_seg) {
1946		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1947				test_name, test_number);
1948		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1949				"input DMA 0x%llx\n",
1950				input_seg,
1951				(unsigned long long) input_dma);
1952		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1953				"ending TRB %p (0x%llx DMA)\n",
1954				start_trb, start_dma,
1955				end_trb, end_dma);
1956		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1957				result_seg, seg);
1958		trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1959			  true);
1960		return -1;
1961	}
1962	return 0;
1963}
1964
1965/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1966static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1967{
1968	struct {
1969		dma_addr_t		input_dma;
1970		struct xhci_segment	*result_seg;
1971	} simple_test_vector [] = {
1972		/* A zeroed DMA field should fail */
1973		{ 0, NULL },
1974		/* One TRB before the ring start should fail */
1975		{ xhci->event_ring->first_seg->dma - 16, NULL },
1976		/* One byte before the ring start should fail */
1977		{ xhci->event_ring->first_seg->dma - 1, NULL },
1978		/* Starting TRB should succeed */
1979		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1980		/* Ending TRB should succeed */
1981		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1982			xhci->event_ring->first_seg },
1983		/* One byte after the ring end should fail */
1984		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1985		/* One TRB after the ring end should fail */
1986		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1987		/* An address of all ones should fail */
1988		{ (dma_addr_t) (~0), NULL },
1989	};
1990	struct {
1991		struct xhci_segment	*input_seg;
1992		union xhci_trb		*start_trb;
1993		union xhci_trb		*end_trb;
1994		dma_addr_t		input_dma;
1995		struct xhci_segment	*result_seg;
1996	} complex_test_vector [] = {
1997		/* Test feeding a valid DMA address from a different ring */
1998		{	.input_seg = xhci->event_ring->first_seg,
1999			.start_trb = xhci->event_ring->first_seg->trbs,
2000			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2001			.input_dma = xhci->cmd_ring->first_seg->dma,
2002			.result_seg = NULL,
2003		},
2004		/* Test feeding a valid end TRB from a different ring */
2005		{	.input_seg = xhci->event_ring->first_seg,
2006			.start_trb = xhci->event_ring->first_seg->trbs,
2007			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2008			.input_dma = xhci->cmd_ring->first_seg->dma,
2009			.result_seg = NULL,
2010		},
2011		/* Test feeding a valid start and end TRB from a different ring */
2012		{	.input_seg = xhci->event_ring->first_seg,
2013			.start_trb = xhci->cmd_ring->first_seg->trbs,
2014			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2015			.input_dma = xhci->cmd_ring->first_seg->dma,
2016			.result_seg = NULL,
2017		},
2018		/* TRB in this ring, but after this TD */
2019		{	.input_seg = xhci->event_ring->first_seg,
2020			.start_trb = &xhci->event_ring->first_seg->trbs[0],
2021			.end_trb = &xhci->event_ring->first_seg->trbs[3],
2022			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
2023			.result_seg = NULL,
2024		},
2025		/* TRB in this ring, but before this TD */
2026		{	.input_seg = xhci->event_ring->first_seg,
2027			.start_trb = &xhci->event_ring->first_seg->trbs[3],
2028			.end_trb = &xhci->event_ring->first_seg->trbs[6],
2029			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
2030			.result_seg = NULL,
2031		},
2032		/* TRB in this ring, but after this wrapped TD */
2033		{	.input_seg = xhci->event_ring->first_seg,
2034			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2035			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2036			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
2037			.result_seg = NULL,
2038		},
2039		/* TRB in this ring, but before this wrapped TD */
2040		{	.input_seg = xhci->event_ring->first_seg,
2041			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2042			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2043			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2044			.result_seg = NULL,
2045		},
2046		/* TRB not in this ring, and we have a wrapped TD */
2047		{	.input_seg = xhci->event_ring->first_seg,
2048			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2049			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2050			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2051			.result_seg = NULL,
2052		},
2053	};
2054
2055	unsigned int num_tests;
2056	int i, ret;
2057
2058	num_tests = ARRAY_SIZE(simple_test_vector);
2059	for (i = 0; i < num_tests; i++) {
2060		ret = xhci_test_trb_in_td(xhci,
2061				xhci->event_ring->first_seg,
2062				xhci->event_ring->first_seg->trbs,
2063				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2064				simple_test_vector[i].input_dma,
2065				simple_test_vector[i].result_seg,
2066				"Simple", i);
2067		if (ret < 0)
2068			return ret;
2069	}
2070
2071	num_tests = ARRAY_SIZE(complex_test_vector);
2072	for (i = 0; i < num_tests; i++) {
2073		ret = xhci_test_trb_in_td(xhci,
2074				complex_test_vector[i].input_seg,
2075				complex_test_vector[i].start_trb,
2076				complex_test_vector[i].end_trb,
2077				complex_test_vector[i].input_dma,
2078				complex_test_vector[i].result_seg,
2079				"Complex", i);
2080		if (ret < 0)
2081			return ret;
2082	}
2083	xhci_dbg(xhci, "TRB math tests passed.\n");
2084	return 0;
2085}
2086
2087static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2088{
2089	u64 temp;
2090	dma_addr_t deq;
2091
2092	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2093			xhci->event_ring->dequeue);
2094	if (deq == 0 && !in_interrupt())
2095		xhci_warn(xhci, "WARN something wrong with SW event ring "
2096				"dequeue ptr.\n");
2097	/* Update HC event ring dequeue pointer */
2098	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2099	temp &= ERST_PTR_MASK;
2100	/* Don't clear the EHB bit (which is RW1C) because
2101	 * there might be more events to service.
2102	 */
2103	temp &= ~ERST_EHB;
2104	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2105			"// Write event ring dequeue pointer, "
2106			"preserving EHB bit");
2107	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2108			&xhci->ir_set->erst_dequeue);
2109}
2110
2111static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2112		__le32 __iomem *addr, int max_caps)
2113{
2114	u32 temp, port_offset, port_count;
2115	int i;
2116	u8 major_revision, minor_revision;
2117	struct xhci_hub *rhub;
2118	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
 
2119
2120	temp = readl(addr);
2121	major_revision = XHCI_EXT_PORT_MAJOR(temp);
2122	minor_revision = XHCI_EXT_PORT_MINOR(temp);
2123
2124	if (major_revision == 0x03) {
2125		rhub = &xhci->usb3_rhub;
 
 
 
 
 
 
 
 
 
2126	} else if (major_revision <= 0x02) {
2127		rhub = &xhci->usb2_rhub;
2128	} else {
2129		xhci_warn(xhci, "Ignoring unknown port speed, "
2130				"Ext Cap %p, revision = 0x%x\n",
2131				addr, major_revision);
2132		/* Ignoring port protocol we can't understand. FIXME */
2133		return;
2134	}
2135	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2136
2137	if (rhub->min_rev < minor_revision)
2138		rhub->min_rev = minor_revision;
2139
2140	/* Port offset and count in the third dword, see section 7.2 */
2141	temp = readl(addr + 2);
2142	port_offset = XHCI_EXT_PORT_OFF(temp);
2143	port_count = XHCI_EXT_PORT_COUNT(temp);
2144	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2145			"Ext Cap %p, port offset = %u, "
2146			"count = %u, revision = 0x%x",
2147			addr, port_offset, port_count, major_revision);
2148	/* Port count includes the current port offset */
2149	if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2150		/* WTF? "Valid values are ‘1’ to MaxPorts" */
2151		return;
2152
2153	rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
2154	if (rhub->psi_count) {
2155		rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
2156				    GFP_KERNEL, dev_to_node(dev));
2157		if (!rhub->psi)
2158			rhub->psi_count = 0;
2159
2160		rhub->psi_uid_count++;
2161		for (i = 0; i < rhub->psi_count; i++) {
2162			rhub->psi[i] = readl(addr + 4 + i);
 
 
 
 
 
 
 
 
2163
2164			/* count unique ID values, two consecutive entries can
2165			 * have the same ID if link is assymetric
2166			 */
2167			if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
2168				  XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
2169				rhub->psi_uid_count++;
2170
2171			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2172				  XHCI_EXT_PORT_PSIV(rhub->psi[i]),
2173				  XHCI_EXT_PORT_PSIE(rhub->psi[i]),
2174				  XHCI_EXT_PORT_PLT(rhub->psi[i]),
2175				  XHCI_EXT_PORT_PFD(rhub->psi[i]),
2176				  XHCI_EXT_PORT_LP(rhub->psi[i]),
2177				  XHCI_EXT_PORT_PSIM(rhub->psi[i]));
2178		}
2179	}
2180	/* cache usb2 port capabilities */
2181	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2182		xhci->ext_caps[xhci->num_ext_caps++] = temp;
2183
2184	if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2185		 (temp & XHCI_HLC)) {
2186		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2187			       "xHCI 1.0: support USB2 hardware lpm");
2188		xhci->hw_lpm_support = 1;
2189	}
2190
2191	port_offset--;
2192	for (i = port_offset; i < (port_offset + port_count); i++) {
2193		struct xhci_port *hw_port = &xhci->hw_ports[i];
2194		/* Duplicate entry.  Ignore the port if the revisions differ. */
2195		if (hw_port->rhub) {
2196			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2197					" port %u\n", addr, i);
2198			xhci_warn(xhci, "Port was marked as USB %u, "
2199					"duplicated as USB %u\n",
2200					hw_port->rhub->maj_rev, major_revision);
2201			/* Only adjust the roothub port counts if we haven't
2202			 * found a similar duplicate.
2203			 */
2204			if (hw_port->rhub != rhub &&
2205				 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2206				hw_port->rhub->num_ports--;
2207				hw_port->hcd_portnum = DUPLICATE_ENTRY;
2208			}
2209			continue;
2210		}
2211		hw_port->rhub = rhub;
 
2212		rhub->num_ports++;
2213	}
2214	/* FIXME: Should we disable ports not in the Extended Capabilities? */
2215}
2216
2217static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2218					struct xhci_hub *rhub, gfp_t flags)
2219{
2220	int port_index = 0;
2221	int i;
2222	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2223
2224	if (!rhub->num_ports)
2225		return;
2226	rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
2227			dev_to_node(dev));
 
 
 
2228	for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2229		if (xhci->hw_ports[i].rhub != rhub ||
2230		    xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2231			continue;
2232		xhci->hw_ports[i].hcd_portnum = port_index;
2233		rhub->ports[port_index] = &xhci->hw_ports[i];
2234		port_index++;
2235		if (port_index == rhub->num_ports)
2236			break;
2237	}
2238}
2239
2240/*
2241 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2242 * specify what speeds each port is supposed to be.  We can't count on the port
2243 * speed bits in the PORTSC register being correct until a device is connected,
2244 * but we need to set up the two fake roothubs with the correct number of USB
2245 * 3.0 and USB 2.0 ports at host controller initialization time.
2246 */
2247static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2248{
2249	void __iomem *base;
2250	u32 offset;
2251	unsigned int num_ports;
2252	int i, j;
2253	int cap_count = 0;
2254	u32 cap_start;
2255	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2256
2257	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2258	xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2259				flags, dev_to_node(dev));
2260	if (!xhci->hw_ports)
2261		return -ENOMEM;
2262
2263	for (i = 0; i < num_ports; i++) {
2264		xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2265			NUM_PORT_REGS * i;
2266		xhci->hw_ports[i].hw_portnum = i;
2267	}
2268
2269	xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2270				   dev_to_node(dev));
2271	if (!xhci->rh_bw)
2272		return -ENOMEM;
2273	for (i = 0; i < num_ports; i++) {
2274		struct xhci_interval_bw_table *bw_table;
2275
2276		INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2277		bw_table = &xhci->rh_bw[i].bw_table;
2278		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2279			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2280	}
2281	base = &xhci->cap_regs->hc_capbase;
2282
2283	cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2284	if (!cap_start) {
2285		xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2286		return -ENODEV;
2287	}
2288
2289	offset = cap_start;
2290	/* count extended protocol capability entries for later caching */
2291	while (offset) {
2292		cap_count++;
2293		offset = xhci_find_next_ext_cap(base, offset,
2294						      XHCI_EXT_CAPS_PROTOCOL);
2295	}
2296
2297	xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2298				flags, dev_to_node(dev));
2299	if (!xhci->ext_caps)
2300		return -ENOMEM;
2301
 
 
 
 
 
2302	offset = cap_start;
2303
2304	while (offset) {
2305		xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2306		if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2307		    num_ports)
2308			break;
2309		offset = xhci_find_next_ext_cap(base, offset,
2310						XHCI_EXT_CAPS_PROTOCOL);
2311	}
2312	if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2313		xhci_warn(xhci, "No ports on the roothubs?\n");
2314		return -ENODEV;
2315	}
2316	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2317		       "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2318		       xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2319
2320	/* Place limits on the number of roothub ports so that the hub
2321	 * descriptors aren't longer than the USB core will allocate.
2322	 */
2323	if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2324		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2325				"Limiting USB 3.0 roothub ports to %u.",
2326				USB_SS_MAXPORTS);
2327		xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2328	}
2329	if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2330		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2331				"Limiting USB 2.0 roothub ports to %u.",
2332				USB_MAXCHILDREN);
2333		xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2334	}
2335
2336	/*
2337	 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2338	 * Not sure how the USB core will handle a hub with no ports...
2339	 */
 
2340
2341	xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2342	xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2343
2344	return 0;
2345}
2346
2347int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2348{
2349	dma_addr_t	dma;
2350	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
2351	unsigned int	val, val2;
2352	u64		val_64;
2353	u32		page_size, temp;
2354	int		i, ret;
2355
2356	INIT_LIST_HEAD(&xhci->cmd_list);
2357
2358	/* init command timeout work */
2359	INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2360	init_completion(&xhci->cmd_ring_stop_completion);
2361
2362	page_size = readl(&xhci->op_regs->page_size);
2363	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2364			"Supported page size register = 0x%x", page_size);
2365	for (i = 0; i < 16; i++) {
2366		if ((0x1 & page_size) != 0)
2367			break;
2368		page_size = page_size >> 1;
2369	}
2370	if (i < 16)
2371		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2372			"Supported page size of %iK", (1 << (i+12)) / 1024);
2373	else
2374		xhci_warn(xhci, "WARN: no supported page size\n");
2375	/* Use 4K pages, since that's common and the minimum the HC supports */
2376	xhci->page_shift = 12;
2377	xhci->page_size = 1 << xhci->page_shift;
2378	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2379			"HCD page size set to %iK", xhci->page_size / 1024);
2380
2381	/*
2382	 * Program the Number of Device Slots Enabled field in the CONFIG
2383	 * register with the max value of slots the HC can handle.
2384	 */
2385	val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2386	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2387			"// xHC can handle at most %d device slots.", val);
2388	val2 = readl(&xhci->op_regs->config_reg);
2389	val |= (val2 & ~HCS_SLOTS_MASK);
2390	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2391			"// Setting Max device slots reg = 0x%x.", val);
2392	writel(val, &xhci->op_regs->config_reg);
2393
2394	/*
2395	 * xHCI section 5.4.6 - doorbell array must be
2396	 * "physically contiguous and 64-byte (cache line) aligned".
2397	 */
2398	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2399			flags);
2400	if (!xhci->dcbaa)
2401		goto fail;
2402	xhci->dcbaa->dma = dma;
2403	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2404			"// Device context base array address = 0x%llx (DMA), %p (virt)",
2405			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2406	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2407
2408	/*
2409	 * Initialize the ring segment pool.  The ring must be a contiguous
2410	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
2411	 * however, the command ring segment needs 64-byte aligned segments
2412	 * and our use of dma addresses in the trb_address_map radix tree needs
2413	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2414	 */
2415	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2416			TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2417
2418	/* See Table 46 and Note on Figure 55 */
2419	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2420			2112, 64, xhci->page_size);
2421	if (!xhci->segment_pool || !xhci->device_pool)
2422		goto fail;
2423
2424	/* Linear stream context arrays don't have any boundary restrictions,
2425	 * and only need to be 16-byte aligned.
2426	 */
2427	xhci->small_streams_pool =
2428		dma_pool_create("xHCI 256 byte stream ctx arrays",
2429			dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2430	xhci->medium_streams_pool =
2431		dma_pool_create("xHCI 1KB stream ctx arrays",
2432			dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2433	/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2434	 * will be allocated with dma_alloc_coherent()
2435	 */
2436
2437	if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2438		goto fail;
2439
2440	/* Set up the command ring to have one segments for now. */
2441	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2442	if (!xhci->cmd_ring)
2443		goto fail;
2444	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2445			"Allocated command ring at %p", xhci->cmd_ring);
2446	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2447			(unsigned long long)xhci->cmd_ring->first_seg->dma);
2448
2449	/* Set the address in the Command Ring Control register */
2450	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2451	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2452		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2453		xhci->cmd_ring->cycle_state;
2454	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2455			"// Setting command ring address to 0x%016llx", val_64);
2456	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2457
2458	xhci->lpm_command = xhci_alloc_command_with_ctx(xhci, true, flags);
2459	if (!xhci->lpm_command)
2460		goto fail;
2461
2462	/* Reserve one command ring TRB for disabling LPM.
2463	 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2464	 * disabling LPM, we only need to reserve one TRB for all devices.
2465	 */
2466	xhci->cmd_ring_reserved_trbs++;
2467
2468	val = readl(&xhci->cap_regs->db_off);
2469	val &= DBOFF_MASK;
2470	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2471			"// Doorbell array is located at offset 0x%x"
2472			" from cap regs base addr", val);
2473	xhci->dba = (void __iomem *) xhci->cap_regs + val;
2474	/* Set ir_set to interrupt register set 0 */
2475	xhci->ir_set = &xhci->run_regs->ir_set[0];
2476
2477	/*
2478	 * Event ring setup: Allocate a normal ring, but also setup
2479	 * the event ring segment table (ERST).  Section 4.9.3.
2480	 */
2481	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2482	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2483					0, flags);
2484	if (!xhci->event_ring)
2485		goto fail;
2486	if (xhci_check_trb_in_td_math(xhci) < 0)
2487		goto fail;
2488
2489	ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2490	if (ret)
2491		goto fail;
2492
2493	/* set ERST count with the number of entries in the segment table */
2494	val = readl(&xhci->ir_set->erst_size);
2495	val &= ERST_SIZE_MASK;
2496	val |= ERST_NUM_SEGS;
2497	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2498			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
2499			val);
2500	writel(val, &xhci->ir_set->erst_size);
2501
2502	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2503			"// Set ERST entries to point to event ring.");
2504	/* set the segment table base address */
2505	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2506			"// Set ERST base address for ir_set 0 = 0x%llx",
2507			(unsigned long long)xhci->erst.erst_dma_addr);
2508	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2509	val_64 &= ERST_PTR_MASK;
2510	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2511	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2512
2513	/* Set the event ring dequeue address */
2514	xhci_set_hc_event_deq(xhci);
2515	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2516			"Wrote ERST address to ir_set 0.");
2517
 
 
2518	/*
2519	 * XXX: Might need to set the Interrupter Moderation Register to
2520	 * something other than the default (~1ms minimum between interrupts).
2521	 * See section 5.5.1.2.
2522	 */
2523	for (i = 0; i < MAX_HC_SLOTS; i++)
2524		xhci->devs[i] = NULL;
2525	for (i = 0; i < USB_MAXCHILDREN; i++) {
2526		xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2527		xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2528		/* Only the USB 2.0 completions will ever be used. */
2529		init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
 
2530	}
2531
2532	if (scratchpad_alloc(xhci, flags))
2533		goto fail;
2534	if (xhci_setup_port_arrays(xhci, flags))
2535		goto fail;
2536
2537	/* Enable USB 3.0 device notifications for function remote wake, which
2538	 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2539	 * U3 (device suspend).
2540	 */
2541	temp = readl(&xhci->op_regs->dev_notification);
2542	temp &= ~DEV_NOTE_MASK;
2543	temp |= DEV_NOTE_FWAKE;
2544	writel(temp, &xhci->op_regs->dev_notification);
2545
2546	return 0;
2547
2548fail:
2549	xhci_halt(xhci);
2550	xhci_reset(xhci);
2551	xhci_mem_cleanup(xhci);
2552	return -ENOMEM;
2553}