Loading...
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/usb.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include <linux/dmapool.h>
27
28#include "xhci.h"
29
30/*
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
33 *
34 * Section 4.11.1.1:
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 */
37static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
38{
39 struct xhci_segment *seg;
40 dma_addr_t dma;
41
42 seg = kzalloc(sizeof *seg, flags);
43 if (!seg)
44 return NULL;
45 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
46
47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
48 if (!seg->trbs) {
49 kfree(seg);
50 return NULL;
51 }
52 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg->trbs, (unsigned long long)dma);
54
55 memset(seg->trbs, 0, SEGMENT_SIZE);
56 seg->dma = dma;
57 seg->next = NULL;
58
59 return seg;
60}
61
62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
63{
64 if (!seg)
65 return;
66 if (seg->trbs) {
67 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
68 seg->trbs, (unsigned long long)seg->dma);
69 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 seg->trbs = NULL;
71 }
72 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
73 kfree(seg);
74}
75
76/*
77 * Make the prev segment point to the next segment.
78 *
79 * Change the last TRB in the prev segment to be a Link TRB which points to the
80 * DMA address of the next segment. The caller needs to set any Link TRB
81 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 */
83static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
84 struct xhci_segment *next, bool link_trbs)
85{
86 u32 val;
87
88 if (!prev || !next)
89 return;
90 prev->next = next;
91 if (link_trbs) {
92 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
93 cpu_to_le64(next->dma);
94
95 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
96 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
97 val &= ~TRB_TYPE_BITMASK;
98 val |= TRB_TYPE(TRB_LINK);
99 /* Always set the chain bit with 0.95 hardware */
100 if (xhci_link_trb_quirk(xhci))
101 val |= TRB_CHAIN;
102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
103 }
104 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
105 (unsigned long long)prev->dma,
106 (unsigned long long)next->dma);
107}
108
109/* XXX: Do we need the hcd structure in all these functions? */
110void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
111{
112 struct xhci_segment *seg;
113 struct xhci_segment *first_seg;
114
115 if (!ring || !ring->first_seg)
116 return;
117 first_seg = ring->first_seg;
118 seg = first_seg->next;
119 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
120 while (seg != first_seg) {
121 struct xhci_segment *next = seg->next;
122 xhci_segment_free(xhci, seg);
123 seg = next;
124 }
125 xhci_segment_free(xhci, first_seg);
126 ring->first_seg = NULL;
127 kfree(ring);
128}
129
130static void xhci_initialize_ring_info(struct xhci_ring *ring)
131{
132 /* The ring is empty, so the enqueue pointer == dequeue pointer */
133 ring->enqueue = ring->first_seg->trbs;
134 ring->enq_seg = ring->first_seg;
135 ring->dequeue = ring->enqueue;
136 ring->deq_seg = ring->first_seg;
137 /* The ring is initialized to 0. The producer must write 1 to the cycle
138 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
139 * compare CCS to the cycle bit to check ownership, so CCS = 1.
140 */
141 ring->cycle_state = 1;
142 /* Not necessary for new rings, but needed for re-initialized rings */
143 ring->enq_updates = 0;
144 ring->deq_updates = 0;
145}
146
147/**
148 * Create a new ring with zero or more segments.
149 *
150 * Link each segment together into a ring.
151 * Set the end flag and the cycle toggle bit on the last segment.
152 * See section 4.9.1 and figures 15 and 16.
153 */
154static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
155 unsigned int num_segs, bool link_trbs, gfp_t flags)
156{
157 struct xhci_ring *ring;
158 struct xhci_segment *prev;
159
160 ring = kzalloc(sizeof *(ring), flags);
161 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
162 if (!ring)
163 return NULL;
164
165 INIT_LIST_HEAD(&ring->td_list);
166 if (num_segs == 0)
167 return ring;
168
169 ring->first_seg = xhci_segment_alloc(xhci, flags);
170 if (!ring->first_seg)
171 goto fail;
172 num_segs--;
173
174 prev = ring->first_seg;
175 while (num_segs > 0) {
176 struct xhci_segment *next;
177
178 next = xhci_segment_alloc(xhci, flags);
179 if (!next)
180 goto fail;
181 xhci_link_segments(xhci, prev, next, link_trbs);
182
183 prev = next;
184 num_segs--;
185 }
186 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
187
188 if (link_trbs) {
189 /* See section 4.9.2.1 and 6.4.4.1 */
190 prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
191 cpu_to_le32(LINK_TOGGLE);
192 xhci_dbg(xhci, "Wrote link toggle flag to"
193 " segment %p (virtual), 0x%llx (DMA)\n",
194 prev, (unsigned long long)prev->dma);
195 }
196 xhci_initialize_ring_info(ring);
197 return ring;
198
199fail:
200 xhci_ring_free(xhci, ring);
201 return NULL;
202}
203
204void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
205 struct xhci_virt_device *virt_dev,
206 unsigned int ep_index)
207{
208 int rings_cached;
209
210 rings_cached = virt_dev->num_rings_cached;
211 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
212 virt_dev->ring_cache[rings_cached] =
213 virt_dev->eps[ep_index].ring;
214 virt_dev->num_rings_cached++;
215 xhci_dbg(xhci, "Cached old ring, "
216 "%d ring%s cached\n",
217 virt_dev->num_rings_cached,
218 (virt_dev->num_rings_cached > 1) ? "s" : "");
219 } else {
220 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
221 xhci_dbg(xhci, "Ring cache full (%d rings), "
222 "freeing ring\n",
223 virt_dev->num_rings_cached);
224 }
225 virt_dev->eps[ep_index].ring = NULL;
226}
227
228/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
229 * pointers to the beginning of the ring.
230 */
231static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
232 struct xhci_ring *ring)
233{
234 struct xhci_segment *seg = ring->first_seg;
235 do {
236 memset(seg->trbs, 0,
237 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
238 /* All endpoint rings have link TRBs */
239 xhci_link_segments(xhci, seg, seg->next, 1);
240 seg = seg->next;
241 } while (seg != ring->first_seg);
242 xhci_initialize_ring_info(ring);
243 /* td list should be empty since all URBs have been cancelled,
244 * but just in case...
245 */
246 INIT_LIST_HEAD(&ring->td_list);
247}
248
249#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
250
251static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
252 int type, gfp_t flags)
253{
254 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
255 if (!ctx)
256 return NULL;
257
258 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
259 ctx->type = type;
260 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
261 if (type == XHCI_CTX_TYPE_INPUT)
262 ctx->size += CTX_SIZE(xhci->hcc_params);
263
264 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
265 memset(ctx->bytes, 0, ctx->size);
266 return ctx;
267}
268
269static void xhci_free_container_ctx(struct xhci_hcd *xhci,
270 struct xhci_container_ctx *ctx)
271{
272 if (!ctx)
273 return;
274 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
275 kfree(ctx);
276}
277
278struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
279 struct xhci_container_ctx *ctx)
280{
281 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
282 return (struct xhci_input_control_ctx *)ctx->bytes;
283}
284
285struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
286 struct xhci_container_ctx *ctx)
287{
288 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
289 return (struct xhci_slot_ctx *)ctx->bytes;
290
291 return (struct xhci_slot_ctx *)
292 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
293}
294
295struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
296 struct xhci_container_ctx *ctx,
297 unsigned int ep_index)
298{
299 /* increment ep index by offset of start of ep ctx array */
300 ep_index++;
301 if (ctx->type == XHCI_CTX_TYPE_INPUT)
302 ep_index++;
303
304 return (struct xhci_ep_ctx *)
305 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
306}
307
308
309/***************** Streams structures manipulation *************************/
310
311static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
312 unsigned int num_stream_ctxs,
313 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
314{
315 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
316
317 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
318 pci_free_consistent(pdev,
319 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
320 stream_ctx, dma);
321 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
322 return dma_pool_free(xhci->small_streams_pool,
323 stream_ctx, dma);
324 else
325 return dma_pool_free(xhci->medium_streams_pool,
326 stream_ctx, dma);
327}
328
329/*
330 * The stream context array for each endpoint with bulk streams enabled can
331 * vary in size, based on:
332 * - how many streams the endpoint supports,
333 * - the maximum primary stream array size the host controller supports,
334 * - and how many streams the device driver asks for.
335 *
336 * The stream context array must be a power of 2, and can be as small as
337 * 64 bytes or as large as 1MB.
338 */
339static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
340 unsigned int num_stream_ctxs, dma_addr_t *dma,
341 gfp_t mem_flags)
342{
343 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
344
345 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
346 return pci_alloc_consistent(pdev,
347 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
348 dma);
349 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
350 return dma_pool_alloc(xhci->small_streams_pool,
351 mem_flags, dma);
352 else
353 return dma_pool_alloc(xhci->medium_streams_pool,
354 mem_flags, dma);
355}
356
357struct xhci_ring *xhci_dma_to_transfer_ring(
358 struct xhci_virt_ep *ep,
359 u64 address)
360{
361 if (ep->ep_state & EP_HAS_STREAMS)
362 return radix_tree_lookup(&ep->stream_info->trb_address_map,
363 address >> SEGMENT_SHIFT);
364 return ep->ring;
365}
366
367/* Only use this when you know stream_info is valid */
368#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
369static struct xhci_ring *dma_to_stream_ring(
370 struct xhci_stream_info *stream_info,
371 u64 address)
372{
373 return radix_tree_lookup(&stream_info->trb_address_map,
374 address >> SEGMENT_SHIFT);
375}
376#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
377
378struct xhci_ring *xhci_stream_id_to_ring(
379 struct xhci_virt_device *dev,
380 unsigned int ep_index,
381 unsigned int stream_id)
382{
383 struct xhci_virt_ep *ep = &dev->eps[ep_index];
384
385 if (stream_id == 0)
386 return ep->ring;
387 if (!ep->stream_info)
388 return NULL;
389
390 if (stream_id > ep->stream_info->num_streams)
391 return NULL;
392 return ep->stream_info->stream_rings[stream_id];
393}
394
395#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
396static int xhci_test_radix_tree(struct xhci_hcd *xhci,
397 unsigned int num_streams,
398 struct xhci_stream_info *stream_info)
399{
400 u32 cur_stream;
401 struct xhci_ring *cur_ring;
402 u64 addr;
403
404 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
405 struct xhci_ring *mapped_ring;
406 int trb_size = sizeof(union xhci_trb);
407
408 cur_ring = stream_info->stream_rings[cur_stream];
409 for (addr = cur_ring->first_seg->dma;
410 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
411 addr += trb_size) {
412 mapped_ring = dma_to_stream_ring(stream_info, addr);
413 if (cur_ring != mapped_ring) {
414 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
415 "didn't map to stream ID %u; "
416 "mapped to ring %p\n",
417 (unsigned long long) addr,
418 cur_stream,
419 mapped_ring);
420 return -EINVAL;
421 }
422 }
423 /* One TRB after the end of the ring segment shouldn't return a
424 * pointer to the current ring (although it may be a part of a
425 * different ring).
426 */
427 mapped_ring = dma_to_stream_ring(stream_info, addr);
428 if (mapped_ring != cur_ring) {
429 /* One TRB before should also fail */
430 addr = cur_ring->first_seg->dma - trb_size;
431 mapped_ring = dma_to_stream_ring(stream_info, addr);
432 }
433 if (mapped_ring == cur_ring) {
434 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
435 "mapped to valid stream ID %u; "
436 "mapped ring = %p\n",
437 (unsigned long long) addr,
438 cur_stream,
439 mapped_ring);
440 return -EINVAL;
441 }
442 }
443 return 0;
444}
445#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
446
447/*
448 * Change an endpoint's internal structure so it supports stream IDs. The
449 * number of requested streams includes stream 0, which cannot be used by device
450 * drivers.
451 *
452 * The number of stream contexts in the stream context array may be bigger than
453 * the number of streams the driver wants to use. This is because the number of
454 * stream context array entries must be a power of two.
455 *
456 * We need a radix tree for mapping physical addresses of TRBs to which stream
457 * ID they belong to. We need to do this because the host controller won't tell
458 * us which stream ring the TRB came from. We could store the stream ID in an
459 * event data TRB, but that doesn't help us for the cancellation case, since the
460 * endpoint may stop before it reaches that event data TRB.
461 *
462 * The radix tree maps the upper portion of the TRB DMA address to a ring
463 * segment that has the same upper portion of DMA addresses. For example, say I
464 * have segments of size 1KB, that are always 64-byte aligned. A segment may
465 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
466 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
467 * pass the radix tree a key to get the right stream ID:
468 *
469 * 0x10c90fff >> 10 = 0x43243
470 * 0x10c912c0 >> 10 = 0x43244
471 * 0x10c91400 >> 10 = 0x43245
472 *
473 * Obviously, only those TRBs with DMA addresses that are within the segment
474 * will make the radix tree return the stream ID for that ring.
475 *
476 * Caveats for the radix tree:
477 *
478 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
479 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
480 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
481 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
482 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
483 * extended systems (where the DMA address can be bigger than 32-bits),
484 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
485 */
486struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
487 unsigned int num_stream_ctxs,
488 unsigned int num_streams, gfp_t mem_flags)
489{
490 struct xhci_stream_info *stream_info;
491 u32 cur_stream;
492 struct xhci_ring *cur_ring;
493 unsigned long key;
494 u64 addr;
495 int ret;
496
497 xhci_dbg(xhci, "Allocating %u streams and %u "
498 "stream context array entries.\n",
499 num_streams, num_stream_ctxs);
500 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
501 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
502 return NULL;
503 }
504 xhci->cmd_ring_reserved_trbs++;
505
506 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
507 if (!stream_info)
508 goto cleanup_trbs;
509
510 stream_info->num_streams = num_streams;
511 stream_info->num_stream_ctxs = num_stream_ctxs;
512
513 /* Initialize the array of virtual pointers to stream rings. */
514 stream_info->stream_rings = kzalloc(
515 sizeof(struct xhci_ring *)*num_streams,
516 mem_flags);
517 if (!stream_info->stream_rings)
518 goto cleanup_info;
519
520 /* Initialize the array of DMA addresses for stream rings for the HW. */
521 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
522 num_stream_ctxs, &stream_info->ctx_array_dma,
523 mem_flags);
524 if (!stream_info->stream_ctx_array)
525 goto cleanup_ctx;
526 memset(stream_info->stream_ctx_array, 0,
527 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
528
529 /* Allocate everything needed to free the stream rings later */
530 stream_info->free_streams_command =
531 xhci_alloc_command(xhci, true, true, mem_flags);
532 if (!stream_info->free_streams_command)
533 goto cleanup_ctx;
534
535 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
536
537 /* Allocate rings for all the streams that the driver will use,
538 * and add their segment DMA addresses to the radix tree.
539 * Stream 0 is reserved.
540 */
541 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
542 stream_info->stream_rings[cur_stream] =
543 xhci_ring_alloc(xhci, 1, true, mem_flags);
544 cur_ring = stream_info->stream_rings[cur_stream];
545 if (!cur_ring)
546 goto cleanup_rings;
547 cur_ring->stream_id = cur_stream;
548 /* Set deq ptr, cycle bit, and stream context type */
549 addr = cur_ring->first_seg->dma |
550 SCT_FOR_CTX(SCT_PRI_TR) |
551 cur_ring->cycle_state;
552 stream_info->stream_ctx_array[cur_stream].stream_ring =
553 cpu_to_le64(addr);
554 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
555 cur_stream, (unsigned long long) addr);
556
557 key = (unsigned long)
558 (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
559 ret = radix_tree_insert(&stream_info->trb_address_map,
560 key, cur_ring);
561 if (ret) {
562 xhci_ring_free(xhci, cur_ring);
563 stream_info->stream_rings[cur_stream] = NULL;
564 goto cleanup_rings;
565 }
566 }
567 /* Leave the other unused stream ring pointers in the stream context
568 * array initialized to zero. This will cause the xHC to give us an
569 * error if the device asks for a stream ID we don't have setup (if it
570 * was any other way, the host controller would assume the ring is
571 * "empty" and wait forever for data to be queued to that stream ID).
572 */
573#if XHCI_DEBUG
574 /* Do a little test on the radix tree to make sure it returns the
575 * correct values.
576 */
577 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
578 goto cleanup_rings;
579#endif
580
581 return stream_info;
582
583cleanup_rings:
584 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
585 cur_ring = stream_info->stream_rings[cur_stream];
586 if (cur_ring) {
587 addr = cur_ring->first_seg->dma;
588 radix_tree_delete(&stream_info->trb_address_map,
589 addr >> SEGMENT_SHIFT);
590 xhci_ring_free(xhci, cur_ring);
591 stream_info->stream_rings[cur_stream] = NULL;
592 }
593 }
594 xhci_free_command(xhci, stream_info->free_streams_command);
595cleanup_ctx:
596 kfree(stream_info->stream_rings);
597cleanup_info:
598 kfree(stream_info);
599cleanup_trbs:
600 xhci->cmd_ring_reserved_trbs--;
601 return NULL;
602}
603/*
604 * Sets the MaxPStreams field and the Linear Stream Array field.
605 * Sets the dequeue pointer to the stream context array.
606 */
607void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
608 struct xhci_ep_ctx *ep_ctx,
609 struct xhci_stream_info *stream_info)
610{
611 u32 max_primary_streams;
612 /* MaxPStreams is the number of stream context array entries, not the
613 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
614 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
615 */
616 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
617 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
618 1 << (max_primary_streams + 1));
619 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
620 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
621 | EP_HAS_LSA);
622 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
623}
624
625/*
626 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
627 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
628 * not at the beginning of the ring).
629 */
630void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
631 struct xhci_ep_ctx *ep_ctx,
632 struct xhci_virt_ep *ep)
633{
634 dma_addr_t addr;
635 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
636 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
637 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
638}
639
640/* Frees all stream contexts associated with the endpoint,
641 *
642 * Caller should fix the endpoint context streams fields.
643 */
644void xhci_free_stream_info(struct xhci_hcd *xhci,
645 struct xhci_stream_info *stream_info)
646{
647 int cur_stream;
648 struct xhci_ring *cur_ring;
649 dma_addr_t addr;
650
651 if (!stream_info)
652 return;
653
654 for (cur_stream = 1; cur_stream < stream_info->num_streams;
655 cur_stream++) {
656 cur_ring = stream_info->stream_rings[cur_stream];
657 if (cur_ring) {
658 addr = cur_ring->first_seg->dma;
659 radix_tree_delete(&stream_info->trb_address_map,
660 addr >> SEGMENT_SHIFT);
661 xhci_ring_free(xhci, cur_ring);
662 stream_info->stream_rings[cur_stream] = NULL;
663 }
664 }
665 xhci_free_command(xhci, stream_info->free_streams_command);
666 xhci->cmd_ring_reserved_trbs--;
667 if (stream_info->stream_ctx_array)
668 xhci_free_stream_ctx(xhci,
669 stream_info->num_stream_ctxs,
670 stream_info->stream_ctx_array,
671 stream_info->ctx_array_dma);
672
673 if (stream_info)
674 kfree(stream_info->stream_rings);
675 kfree(stream_info);
676}
677
678
679/***************** Device context manipulation *************************/
680
681static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
682 struct xhci_virt_ep *ep)
683{
684 init_timer(&ep->stop_cmd_timer);
685 ep->stop_cmd_timer.data = (unsigned long) ep;
686 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
687 ep->xhci = xhci;
688}
689
690/* All the xhci_tds in the ring's TD list should be freed at this point */
691void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
692{
693 struct xhci_virt_device *dev;
694 int i;
695
696 /* Slot ID 0 is reserved */
697 if (slot_id == 0 || !xhci->devs[slot_id])
698 return;
699
700 dev = xhci->devs[slot_id];
701 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
702 if (!dev)
703 return;
704
705 for (i = 0; i < 31; ++i) {
706 if (dev->eps[i].ring)
707 xhci_ring_free(xhci, dev->eps[i].ring);
708 if (dev->eps[i].stream_info)
709 xhci_free_stream_info(xhci,
710 dev->eps[i].stream_info);
711 }
712
713 if (dev->ring_cache) {
714 for (i = 0; i < dev->num_rings_cached; i++)
715 xhci_ring_free(xhci, dev->ring_cache[i]);
716 kfree(dev->ring_cache);
717 }
718
719 if (dev->in_ctx)
720 xhci_free_container_ctx(xhci, dev->in_ctx);
721 if (dev->out_ctx)
722 xhci_free_container_ctx(xhci, dev->out_ctx);
723
724 kfree(xhci->devs[slot_id]);
725 xhci->devs[slot_id] = NULL;
726}
727
728int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
729 struct usb_device *udev, gfp_t flags)
730{
731 struct xhci_virt_device *dev;
732 int i;
733
734 /* Slot ID 0 is reserved */
735 if (slot_id == 0 || xhci->devs[slot_id]) {
736 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
737 return 0;
738 }
739
740 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
741 if (!xhci->devs[slot_id])
742 return 0;
743 dev = xhci->devs[slot_id];
744
745 /* Allocate the (output) device context that will be used in the HC. */
746 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
747 if (!dev->out_ctx)
748 goto fail;
749
750 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
751 (unsigned long long)dev->out_ctx->dma);
752
753 /* Allocate the (input) device context for address device command */
754 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
755 if (!dev->in_ctx)
756 goto fail;
757
758 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
759 (unsigned long long)dev->in_ctx->dma);
760
761 /* Initialize the cancellation list and watchdog timers for each ep */
762 for (i = 0; i < 31; i++) {
763 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
764 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
765 }
766
767 /* Allocate endpoint 0 ring */
768 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
769 if (!dev->eps[0].ring)
770 goto fail;
771
772 /* Allocate pointers to the ring cache */
773 dev->ring_cache = kzalloc(
774 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
775 flags);
776 if (!dev->ring_cache)
777 goto fail;
778 dev->num_rings_cached = 0;
779
780 init_completion(&dev->cmd_completion);
781 INIT_LIST_HEAD(&dev->cmd_list);
782 dev->udev = udev;
783
784 /* Point to output device context in dcbaa. */
785 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
786 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
787 slot_id,
788 &xhci->dcbaa->dev_context_ptrs[slot_id],
789 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
790
791 return 1;
792fail:
793 xhci_free_virt_device(xhci, slot_id);
794 return 0;
795}
796
797void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
798 struct usb_device *udev)
799{
800 struct xhci_virt_device *virt_dev;
801 struct xhci_ep_ctx *ep0_ctx;
802 struct xhci_ring *ep_ring;
803
804 virt_dev = xhci->devs[udev->slot_id];
805 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
806 ep_ring = virt_dev->eps[0].ring;
807 /*
808 * FIXME we don't keep track of the dequeue pointer very well after a
809 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
810 * host to our enqueue pointer. This should only be called after a
811 * configured device has reset, so all control transfers should have
812 * been completed or cancelled before the reset.
813 */
814 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
815 ep_ring->enqueue)
816 | ep_ring->cycle_state);
817}
818
819/*
820 * The xHCI roothub may have ports of differing speeds in any order in the port
821 * status registers. xhci->port_array provides an array of the port speed for
822 * each offset into the port status registers.
823 *
824 * The xHCI hardware wants to know the roothub port number that the USB device
825 * is attached to (or the roothub port its ancestor hub is attached to). All we
826 * know is the index of that port under either the USB 2.0 or the USB 3.0
827 * roothub, but that doesn't give us the real index into the HW port status
828 * registers. Scan through the xHCI roothub port array, looking for the Nth
829 * entry of the correct port speed. Return the port number of that entry.
830 */
831static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
832 struct usb_device *udev)
833{
834 struct usb_device *top_dev;
835 unsigned int num_similar_speed_ports;
836 unsigned int faked_port_num;
837 int i;
838
839 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
840 top_dev = top_dev->parent)
841 /* Found device below root hub */;
842 faked_port_num = top_dev->portnum;
843 for (i = 0, num_similar_speed_ports = 0;
844 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
845 u8 port_speed = xhci->port_array[i];
846
847 /*
848 * Skip ports that don't have known speeds, or have duplicate
849 * Extended Capabilities port speed entries.
850 */
851 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
852 continue;
853
854 /*
855 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
856 * 1.1 ports are under the USB 2.0 hub. If the port speed
857 * matches the device speed, it's a similar speed port.
858 */
859 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
860 num_similar_speed_ports++;
861 if (num_similar_speed_ports == faked_port_num)
862 /* Roothub ports are numbered from 1 to N */
863 return i+1;
864 }
865 return 0;
866}
867
868/* Setup an xHCI virtual device for a Set Address command */
869int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
870{
871 struct xhci_virt_device *dev;
872 struct xhci_ep_ctx *ep0_ctx;
873 struct xhci_slot_ctx *slot_ctx;
874 struct xhci_input_control_ctx *ctrl_ctx;
875 u32 port_num;
876 struct usb_device *top_dev;
877
878 dev = xhci->devs[udev->slot_id];
879 /* Slot ID 0 is reserved */
880 if (udev->slot_id == 0 || !dev) {
881 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
882 udev->slot_id);
883 return -EINVAL;
884 }
885 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
886 ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
887 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
888
889 /* 2) New slot context and endpoint 0 context are valid*/
890 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
891
892 /* 3) Only the control endpoint is valid - one endpoint context */
893 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
894 switch (udev->speed) {
895 case USB_SPEED_SUPER:
896 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
897 break;
898 case USB_SPEED_HIGH:
899 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
900 break;
901 case USB_SPEED_FULL:
902 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
903 break;
904 case USB_SPEED_LOW:
905 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
906 break;
907 case USB_SPEED_WIRELESS:
908 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
909 return -EINVAL;
910 break;
911 default:
912 /* Speed was set earlier, this shouldn't happen. */
913 BUG();
914 }
915 /* Find the root hub port this device is under */
916 port_num = xhci_find_real_port_number(xhci, udev);
917 if (!port_num)
918 return -EINVAL;
919 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
920 /* Set the port number in the virtual_device to the faked port number */
921 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
922 top_dev = top_dev->parent)
923 /* Found device below root hub */;
924 dev->port = top_dev->portnum;
925 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
926 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->port);
927
928 /* Is this a LS/FS device under an external HS hub? */
929 if (udev->tt && udev->tt->hub->parent) {
930 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
931 (udev->ttport << 8));
932 if (udev->tt->multi)
933 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
934 }
935 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
936 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
937
938 /* Step 4 - ring already allocated */
939 /* Step 5 */
940 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
941 /*
942 * XXX: Not sure about wireless USB devices.
943 */
944 switch (udev->speed) {
945 case USB_SPEED_SUPER:
946 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
947 break;
948 case USB_SPEED_HIGH:
949 /* USB core guesses at a 64-byte max packet first for FS devices */
950 case USB_SPEED_FULL:
951 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
952 break;
953 case USB_SPEED_LOW:
954 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
955 break;
956 case USB_SPEED_WIRELESS:
957 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
958 return -EINVAL;
959 break;
960 default:
961 /* New speed? */
962 BUG();
963 }
964 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
965 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
966
967 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
968 dev->eps[0].ring->cycle_state);
969
970 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
971
972 return 0;
973}
974
975/*
976 * Convert interval expressed as 2^(bInterval - 1) == interval into
977 * straight exponent value 2^n == interval.
978 *
979 */
980static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
981 struct usb_host_endpoint *ep)
982{
983 unsigned int interval;
984
985 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
986 if (interval != ep->desc.bInterval - 1)
987 dev_warn(&udev->dev,
988 "ep %#x - rounding interval to %d %sframes\n",
989 ep->desc.bEndpointAddress,
990 1 << interval,
991 udev->speed == USB_SPEED_FULL ? "" : "micro");
992
993 if (udev->speed == USB_SPEED_FULL) {
994 /*
995 * Full speed isoc endpoints specify interval in frames,
996 * not microframes. We are using microframes everywhere,
997 * so adjust accordingly.
998 */
999 interval += 3; /* 1 frame = 2^3 uframes */
1000 }
1001
1002 return interval;
1003}
1004
1005/*
1006 * Convert bInterval expressed in frames (in 1-255 range) to exponent of
1007 * microframes, rounded down to nearest power of 2.
1008 */
1009static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1010 struct usb_host_endpoint *ep)
1011{
1012 unsigned int interval;
1013
1014 interval = fls(8 * ep->desc.bInterval) - 1;
1015 interval = clamp_val(interval, 3, 10);
1016 if ((1 << interval) != 8 * ep->desc.bInterval)
1017 dev_warn(&udev->dev,
1018 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1019 ep->desc.bEndpointAddress,
1020 1 << interval,
1021 8 * ep->desc.bInterval);
1022
1023 return interval;
1024}
1025
1026/* Return the polling or NAK interval.
1027 *
1028 * The polling interval is expressed in "microframes". If xHCI's Interval field
1029 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1030 *
1031 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1032 * is set to 0.
1033 */
1034static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1035 struct usb_host_endpoint *ep)
1036{
1037 unsigned int interval = 0;
1038
1039 switch (udev->speed) {
1040 case USB_SPEED_HIGH:
1041 /* Max NAK rate */
1042 if (usb_endpoint_xfer_control(&ep->desc) ||
1043 usb_endpoint_xfer_bulk(&ep->desc)) {
1044 interval = ep->desc.bInterval;
1045 break;
1046 }
1047 /* Fall through - SS and HS isoc/int have same decoding */
1048
1049 case USB_SPEED_SUPER:
1050 if (usb_endpoint_xfer_int(&ep->desc) ||
1051 usb_endpoint_xfer_isoc(&ep->desc)) {
1052 interval = xhci_parse_exponent_interval(udev, ep);
1053 }
1054 break;
1055
1056 case USB_SPEED_FULL:
1057 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1058 interval = xhci_parse_exponent_interval(udev, ep);
1059 break;
1060 }
1061 /*
1062 * Fall through for interrupt endpoint interval decoding
1063 * since it uses the same rules as low speed interrupt
1064 * endpoints.
1065 */
1066
1067 case USB_SPEED_LOW:
1068 if (usb_endpoint_xfer_int(&ep->desc) ||
1069 usb_endpoint_xfer_isoc(&ep->desc)) {
1070
1071 interval = xhci_parse_frame_interval(udev, ep);
1072 }
1073 break;
1074
1075 default:
1076 BUG();
1077 }
1078 return EP_INTERVAL(interval);
1079}
1080
1081/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1082 * High speed endpoint descriptors can define "the number of additional
1083 * transaction opportunities per microframe", but that goes in the Max Burst
1084 * endpoint context field.
1085 */
1086static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1087 struct usb_host_endpoint *ep)
1088{
1089 if (udev->speed != USB_SPEED_SUPER ||
1090 !usb_endpoint_xfer_isoc(&ep->desc))
1091 return 0;
1092 return ep->ss_ep_comp.bmAttributes;
1093}
1094
1095static u32 xhci_get_endpoint_type(struct usb_device *udev,
1096 struct usb_host_endpoint *ep)
1097{
1098 int in;
1099 u32 type;
1100
1101 in = usb_endpoint_dir_in(&ep->desc);
1102 if (usb_endpoint_xfer_control(&ep->desc)) {
1103 type = EP_TYPE(CTRL_EP);
1104 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1105 if (in)
1106 type = EP_TYPE(BULK_IN_EP);
1107 else
1108 type = EP_TYPE(BULK_OUT_EP);
1109 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1110 if (in)
1111 type = EP_TYPE(ISOC_IN_EP);
1112 else
1113 type = EP_TYPE(ISOC_OUT_EP);
1114 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1115 if (in)
1116 type = EP_TYPE(INT_IN_EP);
1117 else
1118 type = EP_TYPE(INT_OUT_EP);
1119 } else {
1120 BUG();
1121 }
1122 return type;
1123}
1124
1125/* Return the maximum endpoint service interval time (ESIT) payload.
1126 * Basically, this is the maxpacket size, multiplied by the burst size
1127 * and mult size.
1128 */
1129static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1130 struct usb_device *udev,
1131 struct usb_host_endpoint *ep)
1132{
1133 int max_burst;
1134 int max_packet;
1135
1136 /* Only applies for interrupt or isochronous endpoints */
1137 if (usb_endpoint_xfer_control(&ep->desc) ||
1138 usb_endpoint_xfer_bulk(&ep->desc))
1139 return 0;
1140
1141 if (udev->speed == USB_SPEED_SUPER)
1142 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1143
1144 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
1145 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11;
1146 /* A 0 in max burst means 1 transfer per ESIT */
1147 return max_packet * (max_burst + 1);
1148}
1149
1150/* Set up an endpoint with one ring segment. Do not allocate stream rings.
1151 * Drivers will have to call usb_alloc_streams() to do that.
1152 */
1153int xhci_endpoint_init(struct xhci_hcd *xhci,
1154 struct xhci_virt_device *virt_dev,
1155 struct usb_device *udev,
1156 struct usb_host_endpoint *ep,
1157 gfp_t mem_flags)
1158{
1159 unsigned int ep_index;
1160 struct xhci_ep_ctx *ep_ctx;
1161 struct xhci_ring *ep_ring;
1162 unsigned int max_packet;
1163 unsigned int max_burst;
1164 u32 max_esit_payload;
1165
1166 ep_index = xhci_get_endpoint_index(&ep->desc);
1167 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1168
1169 /* Set up the endpoint ring */
1170 /*
1171 * Isochronous endpoint ring needs bigger size because one isoc URB
1172 * carries multiple packets and it will insert multiple tds to the
1173 * ring.
1174 * This should be replaced with dynamic ring resizing in the future.
1175 */
1176 if (usb_endpoint_xfer_isoc(&ep->desc))
1177 virt_dev->eps[ep_index].new_ring =
1178 xhci_ring_alloc(xhci, 8, true, mem_flags);
1179 else
1180 virt_dev->eps[ep_index].new_ring =
1181 xhci_ring_alloc(xhci, 1, true, mem_flags);
1182 if (!virt_dev->eps[ep_index].new_ring) {
1183 /* Attempt to use the ring cache */
1184 if (virt_dev->num_rings_cached == 0)
1185 return -ENOMEM;
1186 virt_dev->eps[ep_index].new_ring =
1187 virt_dev->ring_cache[virt_dev->num_rings_cached];
1188 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1189 virt_dev->num_rings_cached--;
1190 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
1191 }
1192 virt_dev->eps[ep_index].skip = false;
1193 ep_ring = virt_dev->eps[ep_index].new_ring;
1194 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1195
1196 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1197 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1198
1199 /* FIXME dig Mult and streams info out of ep companion desc */
1200
1201 /* Allow 3 retries for everything but isoc;
1202 * CErr shall be set to 0 for Isoch endpoints.
1203 */
1204 if (!usb_endpoint_xfer_isoc(&ep->desc))
1205 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
1206 else
1207 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
1208
1209 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1210
1211 /* Set the max packet size and max burst */
1212 switch (udev->speed) {
1213 case USB_SPEED_SUPER:
1214 max_packet = le16_to_cpu(ep->desc.wMaxPacketSize);
1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1216 /* dig out max burst from ep companion desc */
1217 max_packet = ep->ss_ep_comp.bMaxBurst;
1218 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
1219 break;
1220 case USB_SPEED_HIGH:
1221 /* bits 11:12 specify the number of additional transaction
1222 * opportunities per microframe (USB 2.0, section 9.6.6)
1223 */
1224 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1225 usb_endpoint_xfer_int(&ep->desc)) {
1226 max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize)
1227 & 0x1800) >> 11;
1228 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
1229 }
1230 /* Fall through */
1231 case USB_SPEED_FULL:
1232 case USB_SPEED_LOW:
1233 max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
1234 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1235 break;
1236 default:
1237 BUG();
1238 }
1239 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1240 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1241
1242 /*
1243 * XXX no idea how to calculate the average TRB buffer length for bulk
1244 * endpoints, as the driver gives us no clue how big each scatter gather
1245 * list entry (or buffer) is going to be.
1246 *
1247 * For isochronous and interrupt endpoints, we set it to the max
1248 * available, until we have new API in the USB core to allow drivers to
1249 * declare how much bandwidth they actually need.
1250 *
1251 * Normally, it would be calculated by taking the total of the buffer
1252 * lengths in the TD and then dividing by the number of TRBs in a TD,
1253 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1254 * use Event Data TRBs, and we don't chain in a link TRB on short
1255 * transfers, we're basically dividing by 1.
1256 *
1257 * xHCI 1.0 specification indicates that the Average TRB Length should
1258 * be set to 8 for control endpoints.
1259 */
1260 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1261 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1262 else
1263 ep_ctx->tx_info |=
1264 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1265
1266 /* FIXME Debug endpoint context */
1267 return 0;
1268}
1269
1270void xhci_endpoint_zero(struct xhci_hcd *xhci,
1271 struct xhci_virt_device *virt_dev,
1272 struct usb_host_endpoint *ep)
1273{
1274 unsigned int ep_index;
1275 struct xhci_ep_ctx *ep_ctx;
1276
1277 ep_index = xhci_get_endpoint_index(&ep->desc);
1278 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1279
1280 ep_ctx->ep_info = 0;
1281 ep_ctx->ep_info2 = 0;
1282 ep_ctx->deq = 0;
1283 ep_ctx->tx_info = 0;
1284 /* Don't free the endpoint ring until the set interface or configuration
1285 * request succeeds.
1286 */
1287}
1288
1289/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1290 * Useful when you want to change one particular aspect of the endpoint and then
1291 * issue a configure endpoint command.
1292 */
1293void xhci_endpoint_copy(struct xhci_hcd *xhci,
1294 struct xhci_container_ctx *in_ctx,
1295 struct xhci_container_ctx *out_ctx,
1296 unsigned int ep_index)
1297{
1298 struct xhci_ep_ctx *out_ep_ctx;
1299 struct xhci_ep_ctx *in_ep_ctx;
1300
1301 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1302 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1303
1304 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1305 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1306 in_ep_ctx->deq = out_ep_ctx->deq;
1307 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1308}
1309
1310/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1311 * Useful when you want to change one particular aspect of the endpoint and then
1312 * issue a configure endpoint command. Only the context entries field matters,
1313 * but we'll copy the whole thing anyway.
1314 */
1315void xhci_slot_copy(struct xhci_hcd *xhci,
1316 struct xhci_container_ctx *in_ctx,
1317 struct xhci_container_ctx *out_ctx)
1318{
1319 struct xhci_slot_ctx *in_slot_ctx;
1320 struct xhci_slot_ctx *out_slot_ctx;
1321
1322 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1323 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1324
1325 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1326 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1327 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1328 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1329}
1330
1331/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1332static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1333{
1334 int i;
1335 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1336 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1337
1338 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1339
1340 if (!num_sp)
1341 return 0;
1342
1343 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1344 if (!xhci->scratchpad)
1345 goto fail_sp;
1346
1347 xhci->scratchpad->sp_array =
1348 pci_alloc_consistent(to_pci_dev(dev),
1349 num_sp * sizeof(u64),
1350 &xhci->scratchpad->sp_dma);
1351 if (!xhci->scratchpad->sp_array)
1352 goto fail_sp2;
1353
1354 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1355 if (!xhci->scratchpad->sp_buffers)
1356 goto fail_sp3;
1357
1358 xhci->scratchpad->sp_dma_buffers =
1359 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1360
1361 if (!xhci->scratchpad->sp_dma_buffers)
1362 goto fail_sp4;
1363
1364 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1365 for (i = 0; i < num_sp; i++) {
1366 dma_addr_t dma;
1367 void *buf = pci_alloc_consistent(to_pci_dev(dev),
1368 xhci->page_size, &dma);
1369 if (!buf)
1370 goto fail_sp5;
1371
1372 xhci->scratchpad->sp_array[i] = dma;
1373 xhci->scratchpad->sp_buffers[i] = buf;
1374 xhci->scratchpad->sp_dma_buffers[i] = dma;
1375 }
1376
1377 return 0;
1378
1379 fail_sp5:
1380 for (i = i - 1; i >= 0; i--) {
1381 pci_free_consistent(to_pci_dev(dev), xhci->page_size,
1382 xhci->scratchpad->sp_buffers[i],
1383 xhci->scratchpad->sp_dma_buffers[i]);
1384 }
1385 kfree(xhci->scratchpad->sp_dma_buffers);
1386
1387 fail_sp4:
1388 kfree(xhci->scratchpad->sp_buffers);
1389
1390 fail_sp3:
1391 pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
1392 xhci->scratchpad->sp_array,
1393 xhci->scratchpad->sp_dma);
1394
1395 fail_sp2:
1396 kfree(xhci->scratchpad);
1397 xhci->scratchpad = NULL;
1398
1399 fail_sp:
1400 return -ENOMEM;
1401}
1402
1403static void scratchpad_free(struct xhci_hcd *xhci)
1404{
1405 int num_sp;
1406 int i;
1407 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1408
1409 if (!xhci->scratchpad)
1410 return;
1411
1412 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1413
1414 for (i = 0; i < num_sp; i++) {
1415 pci_free_consistent(pdev, xhci->page_size,
1416 xhci->scratchpad->sp_buffers[i],
1417 xhci->scratchpad->sp_dma_buffers[i]);
1418 }
1419 kfree(xhci->scratchpad->sp_dma_buffers);
1420 kfree(xhci->scratchpad->sp_buffers);
1421 pci_free_consistent(pdev, num_sp * sizeof(u64),
1422 xhci->scratchpad->sp_array,
1423 xhci->scratchpad->sp_dma);
1424 kfree(xhci->scratchpad);
1425 xhci->scratchpad = NULL;
1426}
1427
1428struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1429 bool allocate_in_ctx, bool allocate_completion,
1430 gfp_t mem_flags)
1431{
1432 struct xhci_command *command;
1433
1434 command = kzalloc(sizeof(*command), mem_flags);
1435 if (!command)
1436 return NULL;
1437
1438 if (allocate_in_ctx) {
1439 command->in_ctx =
1440 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1441 mem_flags);
1442 if (!command->in_ctx) {
1443 kfree(command);
1444 return NULL;
1445 }
1446 }
1447
1448 if (allocate_completion) {
1449 command->completion =
1450 kzalloc(sizeof(struct completion), mem_flags);
1451 if (!command->completion) {
1452 xhci_free_container_ctx(xhci, command->in_ctx);
1453 kfree(command);
1454 return NULL;
1455 }
1456 init_completion(command->completion);
1457 }
1458
1459 command->status = 0;
1460 INIT_LIST_HEAD(&command->cmd_list);
1461 return command;
1462}
1463
1464void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1465{
1466 int last;
1467
1468 if (!urb_priv)
1469 return;
1470
1471 last = urb_priv->length - 1;
1472 if (last >= 0) {
1473 int i;
1474 for (i = 0; i <= last; i++)
1475 kfree(urb_priv->td[i]);
1476 }
1477 kfree(urb_priv);
1478}
1479
1480void xhci_free_command(struct xhci_hcd *xhci,
1481 struct xhci_command *command)
1482{
1483 xhci_free_container_ctx(xhci,
1484 command->in_ctx);
1485 kfree(command->completion);
1486 kfree(command);
1487}
1488
1489void xhci_mem_cleanup(struct xhci_hcd *xhci)
1490{
1491 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1492 int size;
1493 int i;
1494
1495 /* Free the Event Ring Segment Table and the actual Event Ring */
1496 if (xhci->ir_set) {
1497 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
1498 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
1499 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
1500 }
1501 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1502 if (xhci->erst.entries)
1503 pci_free_consistent(pdev, size,
1504 xhci->erst.entries, xhci->erst.erst_dma_addr);
1505 xhci->erst.entries = NULL;
1506 xhci_dbg(xhci, "Freed ERST\n");
1507 if (xhci->event_ring)
1508 xhci_ring_free(xhci, xhci->event_ring);
1509 xhci->event_ring = NULL;
1510 xhci_dbg(xhci, "Freed event ring\n");
1511
1512 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
1513 if (xhci->cmd_ring)
1514 xhci_ring_free(xhci, xhci->cmd_ring);
1515 xhci->cmd_ring = NULL;
1516 xhci_dbg(xhci, "Freed command ring\n");
1517
1518 for (i = 1; i < MAX_HC_SLOTS; ++i)
1519 xhci_free_virt_device(xhci, i);
1520
1521 if (xhci->segment_pool)
1522 dma_pool_destroy(xhci->segment_pool);
1523 xhci->segment_pool = NULL;
1524 xhci_dbg(xhci, "Freed segment pool\n");
1525
1526 if (xhci->device_pool)
1527 dma_pool_destroy(xhci->device_pool);
1528 xhci->device_pool = NULL;
1529 xhci_dbg(xhci, "Freed device context pool\n");
1530
1531 if (xhci->small_streams_pool)
1532 dma_pool_destroy(xhci->small_streams_pool);
1533 xhci->small_streams_pool = NULL;
1534 xhci_dbg(xhci, "Freed small stream array pool\n");
1535
1536 if (xhci->medium_streams_pool)
1537 dma_pool_destroy(xhci->medium_streams_pool);
1538 xhci->medium_streams_pool = NULL;
1539 xhci_dbg(xhci, "Freed medium stream array pool\n");
1540
1541 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1542 if (xhci->dcbaa)
1543 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
1544 xhci->dcbaa, xhci->dcbaa->dma);
1545 xhci->dcbaa = NULL;
1546
1547 scratchpad_free(xhci);
1548
1549 xhci->num_usb2_ports = 0;
1550 xhci->num_usb3_ports = 0;
1551 kfree(xhci->usb2_ports);
1552 kfree(xhci->usb3_ports);
1553 kfree(xhci->port_array);
1554
1555 xhci->page_size = 0;
1556 xhci->page_shift = 0;
1557 xhci->bus_state[0].bus_suspended = 0;
1558 xhci->bus_state[1].bus_suspended = 0;
1559}
1560
1561static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1562 struct xhci_segment *input_seg,
1563 union xhci_trb *start_trb,
1564 union xhci_trb *end_trb,
1565 dma_addr_t input_dma,
1566 struct xhci_segment *result_seg,
1567 char *test_name, int test_number)
1568{
1569 unsigned long long start_dma;
1570 unsigned long long end_dma;
1571 struct xhci_segment *seg;
1572
1573 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1574 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1575
1576 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1577 if (seg != result_seg) {
1578 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1579 test_name, test_number);
1580 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1581 "input DMA 0x%llx\n",
1582 input_seg,
1583 (unsigned long long) input_dma);
1584 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1585 "ending TRB %p (0x%llx DMA)\n",
1586 start_trb, start_dma,
1587 end_trb, end_dma);
1588 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1589 result_seg, seg);
1590 return -1;
1591 }
1592 return 0;
1593}
1594
1595/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1596static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1597{
1598 struct {
1599 dma_addr_t input_dma;
1600 struct xhci_segment *result_seg;
1601 } simple_test_vector [] = {
1602 /* A zeroed DMA field should fail */
1603 { 0, NULL },
1604 /* One TRB before the ring start should fail */
1605 { xhci->event_ring->first_seg->dma - 16, NULL },
1606 /* One byte before the ring start should fail */
1607 { xhci->event_ring->first_seg->dma - 1, NULL },
1608 /* Starting TRB should succeed */
1609 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1610 /* Ending TRB should succeed */
1611 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1612 xhci->event_ring->first_seg },
1613 /* One byte after the ring end should fail */
1614 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1615 /* One TRB after the ring end should fail */
1616 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1617 /* An address of all ones should fail */
1618 { (dma_addr_t) (~0), NULL },
1619 };
1620 struct {
1621 struct xhci_segment *input_seg;
1622 union xhci_trb *start_trb;
1623 union xhci_trb *end_trb;
1624 dma_addr_t input_dma;
1625 struct xhci_segment *result_seg;
1626 } complex_test_vector [] = {
1627 /* Test feeding a valid DMA address from a different ring */
1628 { .input_seg = xhci->event_ring->first_seg,
1629 .start_trb = xhci->event_ring->first_seg->trbs,
1630 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1631 .input_dma = xhci->cmd_ring->first_seg->dma,
1632 .result_seg = NULL,
1633 },
1634 /* Test feeding a valid end TRB from a different ring */
1635 { .input_seg = xhci->event_ring->first_seg,
1636 .start_trb = xhci->event_ring->first_seg->trbs,
1637 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1638 .input_dma = xhci->cmd_ring->first_seg->dma,
1639 .result_seg = NULL,
1640 },
1641 /* Test feeding a valid start and end TRB from a different ring */
1642 { .input_seg = xhci->event_ring->first_seg,
1643 .start_trb = xhci->cmd_ring->first_seg->trbs,
1644 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1645 .input_dma = xhci->cmd_ring->first_seg->dma,
1646 .result_seg = NULL,
1647 },
1648 /* TRB in this ring, but after this TD */
1649 { .input_seg = xhci->event_ring->first_seg,
1650 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1651 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1652 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1653 .result_seg = NULL,
1654 },
1655 /* TRB in this ring, but before this TD */
1656 { .input_seg = xhci->event_ring->first_seg,
1657 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1658 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1659 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1660 .result_seg = NULL,
1661 },
1662 /* TRB in this ring, but after this wrapped TD */
1663 { .input_seg = xhci->event_ring->first_seg,
1664 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1665 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1666 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1667 .result_seg = NULL,
1668 },
1669 /* TRB in this ring, but before this wrapped TD */
1670 { .input_seg = xhci->event_ring->first_seg,
1671 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1672 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1673 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1674 .result_seg = NULL,
1675 },
1676 /* TRB not in this ring, and we have a wrapped TD */
1677 { .input_seg = xhci->event_ring->first_seg,
1678 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1679 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1680 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1681 .result_seg = NULL,
1682 },
1683 };
1684
1685 unsigned int num_tests;
1686 int i, ret;
1687
1688 num_tests = ARRAY_SIZE(simple_test_vector);
1689 for (i = 0; i < num_tests; i++) {
1690 ret = xhci_test_trb_in_td(xhci,
1691 xhci->event_ring->first_seg,
1692 xhci->event_ring->first_seg->trbs,
1693 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1694 simple_test_vector[i].input_dma,
1695 simple_test_vector[i].result_seg,
1696 "Simple", i);
1697 if (ret < 0)
1698 return ret;
1699 }
1700
1701 num_tests = ARRAY_SIZE(complex_test_vector);
1702 for (i = 0; i < num_tests; i++) {
1703 ret = xhci_test_trb_in_td(xhci,
1704 complex_test_vector[i].input_seg,
1705 complex_test_vector[i].start_trb,
1706 complex_test_vector[i].end_trb,
1707 complex_test_vector[i].input_dma,
1708 complex_test_vector[i].result_seg,
1709 "Complex", i);
1710 if (ret < 0)
1711 return ret;
1712 }
1713 xhci_dbg(xhci, "TRB math tests passed.\n");
1714 return 0;
1715}
1716
1717static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1718{
1719 u64 temp;
1720 dma_addr_t deq;
1721
1722 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
1723 xhci->event_ring->dequeue);
1724 if (deq == 0 && !in_interrupt())
1725 xhci_warn(xhci, "WARN something wrong with SW event ring "
1726 "dequeue ptr.\n");
1727 /* Update HC event ring dequeue pointer */
1728 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
1729 temp &= ERST_PTR_MASK;
1730 /* Don't clear the EHB bit (which is RW1C) because
1731 * there might be more events to service.
1732 */
1733 temp &= ~ERST_EHB;
1734 xhci_dbg(xhci, "// Write event ring dequeue pointer, "
1735 "preserving EHB bit\n");
1736 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1737 &xhci->ir_set->erst_dequeue);
1738}
1739
1740static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1741 __le32 __iomem *addr, u8 major_revision)
1742{
1743 u32 temp, port_offset, port_count;
1744 int i;
1745
1746 if (major_revision > 0x03) {
1747 xhci_warn(xhci, "Ignoring unknown port speed, "
1748 "Ext Cap %p, revision = 0x%x\n",
1749 addr, major_revision);
1750 /* Ignoring port protocol we can't understand. FIXME */
1751 return;
1752 }
1753
1754 /* Port offset and count in the third dword, see section 7.2 */
1755 temp = xhci_readl(xhci, addr + 2);
1756 port_offset = XHCI_EXT_PORT_OFF(temp);
1757 port_count = XHCI_EXT_PORT_COUNT(temp);
1758 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
1759 "count = %u, revision = 0x%x\n",
1760 addr, port_offset, port_count, major_revision);
1761 /* Port count includes the current port offset */
1762 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
1763 /* WTF? "Valid values are ‘1’ to MaxPorts" */
1764 return;
1765 port_offset--;
1766 for (i = port_offset; i < (port_offset + port_count); i++) {
1767 /* Duplicate entry. Ignore the port if the revisions differ. */
1768 if (xhci->port_array[i] != 0) {
1769 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
1770 " port %u\n", addr, i);
1771 xhci_warn(xhci, "Port was marked as USB %u, "
1772 "duplicated as USB %u\n",
1773 xhci->port_array[i], major_revision);
1774 /* Only adjust the roothub port counts if we haven't
1775 * found a similar duplicate.
1776 */
1777 if (xhci->port_array[i] != major_revision &&
1778 xhci->port_array[i] != DUPLICATE_ENTRY) {
1779 if (xhci->port_array[i] == 0x03)
1780 xhci->num_usb3_ports--;
1781 else
1782 xhci->num_usb2_ports--;
1783 xhci->port_array[i] = DUPLICATE_ENTRY;
1784 }
1785 /* FIXME: Should we disable the port? */
1786 continue;
1787 }
1788 xhci->port_array[i] = major_revision;
1789 if (major_revision == 0x03)
1790 xhci->num_usb3_ports++;
1791 else
1792 xhci->num_usb2_ports++;
1793 }
1794 /* FIXME: Should we disable ports not in the Extended Capabilities? */
1795}
1796
1797/*
1798 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
1799 * specify what speeds each port is supposed to be. We can't count on the port
1800 * speed bits in the PORTSC register being correct until a device is connected,
1801 * but we need to set up the two fake roothubs with the correct number of USB
1802 * 3.0 and USB 2.0 ports at host controller initialization time.
1803 */
1804static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1805{
1806 __le32 __iomem *addr;
1807 u32 offset;
1808 unsigned int num_ports;
1809 int i, port_index;
1810
1811 addr = &xhci->cap_regs->hcc_params;
1812 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
1813 if (offset == 0) {
1814 xhci_err(xhci, "No Extended Capability registers, "
1815 "unable to set up roothub.\n");
1816 return -ENODEV;
1817 }
1818
1819 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1820 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
1821 if (!xhci->port_array)
1822 return -ENOMEM;
1823
1824 /*
1825 * For whatever reason, the first capability offset is from the
1826 * capability register base, not from the HCCPARAMS register.
1827 * See section 5.3.6 for offset calculation.
1828 */
1829 addr = &xhci->cap_regs->hc_capbase + offset;
1830 while (1) {
1831 u32 cap_id;
1832
1833 cap_id = xhci_readl(xhci, addr);
1834 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
1835 xhci_add_in_port(xhci, num_ports, addr,
1836 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
1837 offset = XHCI_EXT_CAPS_NEXT(cap_id);
1838 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
1839 == num_ports)
1840 break;
1841 /*
1842 * Once you're into the Extended Capabilities, the offset is
1843 * always relative to the register holding the offset.
1844 */
1845 addr += offset;
1846 }
1847
1848 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
1849 xhci_warn(xhci, "No ports on the roothubs?\n");
1850 return -ENODEV;
1851 }
1852 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
1853 xhci->num_usb2_ports, xhci->num_usb3_ports);
1854
1855 /* Place limits on the number of roothub ports so that the hub
1856 * descriptors aren't longer than the USB core will allocate.
1857 */
1858 if (xhci->num_usb3_ports > 15) {
1859 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
1860 xhci->num_usb3_ports = 15;
1861 }
1862 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
1863 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
1864 USB_MAXCHILDREN);
1865 xhci->num_usb2_ports = USB_MAXCHILDREN;
1866 }
1867
1868 /*
1869 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
1870 * Not sure how the USB core will handle a hub with no ports...
1871 */
1872 if (xhci->num_usb2_ports) {
1873 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
1874 xhci->num_usb2_ports, flags);
1875 if (!xhci->usb2_ports)
1876 return -ENOMEM;
1877
1878 port_index = 0;
1879 for (i = 0; i < num_ports; i++) {
1880 if (xhci->port_array[i] == 0x03 ||
1881 xhci->port_array[i] == 0 ||
1882 xhci->port_array[i] == DUPLICATE_ENTRY)
1883 continue;
1884
1885 xhci->usb2_ports[port_index] =
1886 &xhci->op_regs->port_status_base +
1887 NUM_PORT_REGS*i;
1888 xhci_dbg(xhci, "USB 2.0 port at index %u, "
1889 "addr = %p\n", i,
1890 xhci->usb2_ports[port_index]);
1891 port_index++;
1892 if (port_index == xhci->num_usb2_ports)
1893 break;
1894 }
1895 }
1896 if (xhci->num_usb3_ports) {
1897 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
1898 xhci->num_usb3_ports, flags);
1899 if (!xhci->usb3_ports)
1900 return -ENOMEM;
1901
1902 port_index = 0;
1903 for (i = 0; i < num_ports; i++)
1904 if (xhci->port_array[i] == 0x03) {
1905 xhci->usb3_ports[port_index] =
1906 &xhci->op_regs->port_status_base +
1907 NUM_PORT_REGS*i;
1908 xhci_dbg(xhci, "USB 3.0 port at index %u, "
1909 "addr = %p\n", i,
1910 xhci->usb3_ports[port_index]);
1911 port_index++;
1912 if (port_index == xhci->num_usb3_ports)
1913 break;
1914 }
1915 }
1916 return 0;
1917}
1918
1919int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1920{
1921 dma_addr_t dma;
1922 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1923 unsigned int val, val2;
1924 u64 val_64;
1925 struct xhci_segment *seg;
1926 u32 page_size;
1927 int i;
1928
1929 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
1930 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
1931 for (i = 0; i < 16; i++) {
1932 if ((0x1 & page_size) != 0)
1933 break;
1934 page_size = page_size >> 1;
1935 }
1936 if (i < 16)
1937 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
1938 else
1939 xhci_warn(xhci, "WARN: no supported page size\n");
1940 /* Use 4K pages, since that's common and the minimum the HC supports */
1941 xhci->page_shift = 12;
1942 xhci->page_size = 1 << xhci->page_shift;
1943 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
1944
1945 /*
1946 * Program the Number of Device Slots Enabled field in the CONFIG
1947 * register with the max value of slots the HC can handle.
1948 */
1949 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
1950 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
1951 (unsigned int) val);
1952 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
1953 val |= (val2 & ~HCS_SLOTS_MASK);
1954 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
1955 (unsigned int) val);
1956 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
1957
1958 /*
1959 * Section 5.4.8 - doorbell array must be
1960 * "physically contiguous and 64-byte (cache line) aligned".
1961 */
1962 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
1963 sizeof(*xhci->dcbaa), &dma);
1964 if (!xhci->dcbaa)
1965 goto fail;
1966 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
1967 xhci->dcbaa->dma = dma;
1968 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1969 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
1970 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
1971
1972 /*
1973 * Initialize the ring segment pool. The ring must be a contiguous
1974 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1975 * however, the command ring segment needs 64-byte aligned segments,
1976 * so we pick the greater alignment need.
1977 */
1978 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
1979 SEGMENT_SIZE, 64, xhci->page_size);
1980
1981 /* See Table 46 and Note on Figure 55 */
1982 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
1983 2112, 64, xhci->page_size);
1984 if (!xhci->segment_pool || !xhci->device_pool)
1985 goto fail;
1986
1987 /* Linear stream context arrays don't have any boundary restrictions,
1988 * and only need to be 16-byte aligned.
1989 */
1990 xhci->small_streams_pool =
1991 dma_pool_create("xHCI 256 byte stream ctx arrays",
1992 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
1993 xhci->medium_streams_pool =
1994 dma_pool_create("xHCI 1KB stream ctx arrays",
1995 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
1996 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
1997 * will be allocated with pci_alloc_consistent()
1998 */
1999
2000 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2001 goto fail;
2002
2003 /* Set up the command ring to have one segments for now. */
2004 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
2005 if (!xhci->cmd_ring)
2006 goto fail;
2007 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2008 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2009 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2010
2011 /* Set the address in the Command Ring Control register */
2012 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2013 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2014 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2015 xhci->cmd_ring->cycle_state;
2016 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
2017 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2018 xhci_dbg_cmd_ptrs(xhci);
2019
2020 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2021 val &= DBOFF_MASK;
2022 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2023 " from cap regs base addr\n", val);
2024 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2025 xhci_dbg_regs(xhci);
2026 xhci_print_run_regs(xhci);
2027 /* Set ir_set to interrupt register set 0 */
2028 xhci->ir_set = &xhci->run_regs->ir_set[0];
2029
2030 /*
2031 * Event ring setup: Allocate a normal ring, but also setup
2032 * the event ring segment table (ERST). Section 4.9.3.
2033 */
2034 xhci_dbg(xhci, "// Allocating event ring\n");
2035 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
2036 if (!xhci->event_ring)
2037 goto fail;
2038 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2039 goto fail;
2040
2041 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
2042 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
2043 if (!xhci->erst.entries)
2044 goto fail;
2045 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2046 (unsigned long long)dma);
2047
2048 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2049 xhci->erst.num_entries = ERST_NUM_SEGS;
2050 xhci->erst.erst_dma_addr = dma;
2051 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
2052 xhci->erst.num_entries,
2053 xhci->erst.entries,
2054 (unsigned long long)xhci->erst.erst_dma_addr);
2055
2056 /* set ring base address and size for each segment table entry */
2057 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2058 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2059 entry->seg_addr = cpu_to_le64(seg->dma);
2060 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2061 entry->rsvd = 0;
2062 seg = seg->next;
2063 }
2064
2065 /* set ERST count with the number of entries in the segment table */
2066 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2067 val &= ERST_SIZE_MASK;
2068 val |= ERST_NUM_SEGS;
2069 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2070 val);
2071 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2072
2073 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2074 /* set the segment table base address */
2075 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2076 (unsigned long long)xhci->erst.erst_dma_addr);
2077 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2078 val_64 &= ERST_PTR_MASK;
2079 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2080 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2081
2082 /* Set the event ring dequeue address */
2083 xhci_set_hc_event_deq(xhci);
2084 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
2085 xhci_print_ir_set(xhci, 0);
2086
2087 /*
2088 * XXX: Might need to set the Interrupter Moderation Register to
2089 * something other than the default (~1ms minimum between interrupts).
2090 * See section 5.5.1.2.
2091 */
2092 init_completion(&xhci->addr_dev);
2093 for (i = 0; i < MAX_HC_SLOTS; ++i)
2094 xhci->devs[i] = NULL;
2095 for (i = 0; i < USB_MAXCHILDREN; ++i) {
2096 xhci->bus_state[0].resume_done[i] = 0;
2097 xhci->bus_state[1].resume_done[i] = 0;
2098 }
2099
2100 if (scratchpad_alloc(xhci, flags))
2101 goto fail;
2102 if (xhci_setup_port_arrays(xhci, flags))
2103 goto fail;
2104
2105 return 0;
2106
2107fail:
2108 xhci_warn(xhci, "Couldn't initialize memory\n");
2109 xhci_mem_cleanup(xhci);
2110 return -ENOMEM;
2111}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11#include <linux/usb.h>
12#include <linux/overflow.h>
13#include <linux/pci.h>
14#include <linux/slab.h>
15#include <linux/dmapool.h>
16#include <linux/dma-mapping.h>
17
18#include "xhci.h"
19#include "xhci-trace.h"
20#include "xhci-debugfs.h"
21
22/*
23 * Allocates a generic ring segment from the ring pool, sets the dma address,
24 * initializes the segment to zero, and sets the private next pointer to NULL.
25 *
26 * Section 4.11.1.1:
27 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
28 */
29static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
30 unsigned int max_packet,
31 unsigned int num,
32 gfp_t flags)
33{
34 struct xhci_segment *seg;
35 dma_addr_t dma;
36 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
37
38 seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
39 if (!seg)
40 return NULL;
41
42 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
43 if (!seg->trbs) {
44 kfree(seg);
45 return NULL;
46 }
47
48 if (max_packet) {
49 seg->bounce_buf = kzalloc_node(max_packet, flags,
50 dev_to_node(dev));
51 if (!seg->bounce_buf) {
52 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
53 kfree(seg);
54 return NULL;
55 }
56 }
57 seg->num = num;
58 seg->dma = dma;
59 seg->next = NULL;
60
61 return seg;
62}
63
64static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
65{
66 if (seg->trbs) {
67 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
68 seg->trbs = NULL;
69 }
70 kfree(seg->bounce_buf);
71 kfree(seg);
72}
73
74static void xhci_ring_segments_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
75{
76 struct xhci_segment *seg, *next;
77
78 ring->last_seg->next = NULL;
79 seg = ring->first_seg;
80
81 while (seg) {
82 next = seg->next;
83 xhci_segment_free(xhci, seg);
84 seg = next;
85 }
86}
87
88/*
89 * Only for transfer and command rings where driver is the producer, not for
90 * event rings.
91 *
92 * Change the last TRB in the segment to be a Link TRB which points to the
93 * DMA address of the next segment. The caller needs to set any Link TRB
94 * related flags, such as End TRB, Toggle Cycle, and no snoop.
95 */
96static void xhci_set_link_trb(struct xhci_segment *seg, bool chain_links)
97{
98 union xhci_trb *trb;
99 u32 val;
100
101 if (!seg || !seg->next)
102 return;
103
104 trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
105
106 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
107 val = le32_to_cpu(trb->link.control);
108 val &= ~TRB_TYPE_BITMASK;
109 val |= TRB_TYPE(TRB_LINK);
110 if (chain_links)
111 val |= TRB_CHAIN;
112 trb->link.control = cpu_to_le32(val);
113 trb->link.segment_ptr = cpu_to_le64(seg->next->dma);
114}
115
116static void xhci_initialize_ring_segments(struct xhci_hcd *xhci, struct xhci_ring *ring)
117{
118 struct xhci_segment *seg;
119 bool chain_links;
120
121 if (ring->type == TYPE_EVENT)
122 return;
123
124 chain_links = xhci_link_chain_quirk(xhci, ring->type);
125 xhci_for_each_ring_seg(ring->first_seg, seg)
126 xhci_set_link_trb(seg, chain_links);
127
128 /* See section 4.9.2.1 and 6.4.4.1 */
129 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE);
130}
131
132/*
133 * Link the src ring segments to the dst ring.
134 * Set Toggle Cycle for the new ring if needed.
135 */
136static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *src, struct xhci_ring *dst)
137{
138 struct xhci_segment *seg;
139 bool chain_links;
140
141 if (!src || !dst)
142 return;
143
144 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
145 if (dst->cycle_state == 0) {
146 xhci_for_each_ring_seg(src->first_seg, seg) {
147 for (int i = 0; i < TRBS_PER_SEGMENT; i++)
148 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
149 }
150 }
151
152 src->last_seg->next = dst->enq_seg->next;
153 dst->enq_seg->next = src->first_seg;
154 if (dst->type != TYPE_EVENT) {
155 chain_links = xhci_link_chain_quirk(xhci, dst->type);
156 xhci_set_link_trb(dst->enq_seg, chain_links);
157 xhci_set_link_trb(src->last_seg, chain_links);
158 }
159 dst->num_segs += src->num_segs;
160
161 if (dst->enq_seg == dst->last_seg) {
162 if (dst->type != TYPE_EVENT)
163 dst->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
164 &= ~cpu_to_le32(LINK_TOGGLE);
165
166 dst->last_seg = src->last_seg;
167 } else if (dst->type != TYPE_EVENT) {
168 src->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE);
169 }
170
171 for (seg = dst->enq_seg; seg != dst->last_seg; seg = seg->next)
172 seg->next->num = seg->num + 1;
173}
174
175/*
176 * We need a radix tree for mapping physical addresses of TRBs to which stream
177 * ID they belong to. We need to do this because the host controller won't tell
178 * us which stream ring the TRB came from. We could store the stream ID in an
179 * event data TRB, but that doesn't help us for the cancellation case, since the
180 * endpoint may stop before it reaches that event data TRB.
181 *
182 * The radix tree maps the upper portion of the TRB DMA address to a ring
183 * segment that has the same upper portion of DMA addresses. For example, say I
184 * have segments of size 1KB, that are always 1KB aligned. A segment may
185 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
186 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
187 * pass the radix tree a key to get the right stream ID:
188 *
189 * 0x10c90fff >> 10 = 0x43243
190 * 0x10c912c0 >> 10 = 0x43244
191 * 0x10c91400 >> 10 = 0x43245
192 *
193 * Obviously, only those TRBs with DMA addresses that are within the segment
194 * will make the radix tree return the stream ID for that ring.
195 *
196 * Caveats for the radix tree:
197 *
198 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
199 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
200 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
201 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
202 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
203 * extended systems (where the DMA address can be bigger than 32-bits),
204 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
205 */
206static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
207 struct xhci_ring *ring,
208 struct xhci_segment *seg,
209 gfp_t mem_flags)
210{
211 unsigned long key;
212 int ret;
213
214 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
215 /* Skip any segments that were already added. */
216 if (radix_tree_lookup(trb_address_map, key))
217 return 0;
218
219 ret = radix_tree_maybe_preload(mem_flags);
220 if (ret)
221 return ret;
222 ret = radix_tree_insert(trb_address_map,
223 key, ring);
224 radix_tree_preload_end();
225 return ret;
226}
227
228static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
229 struct xhci_segment *seg)
230{
231 unsigned long key;
232
233 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
234 if (radix_tree_lookup(trb_address_map, key))
235 radix_tree_delete(trb_address_map, key);
236}
237
238static int xhci_update_stream_segment_mapping(
239 struct radix_tree_root *trb_address_map,
240 struct xhci_ring *ring,
241 struct xhci_segment *first_seg,
242 gfp_t mem_flags)
243{
244 struct xhci_segment *seg;
245 struct xhci_segment *failed_seg;
246 int ret;
247
248 if (WARN_ON_ONCE(trb_address_map == NULL))
249 return 0;
250
251 xhci_for_each_ring_seg(first_seg, seg) {
252 ret = xhci_insert_segment_mapping(trb_address_map,
253 ring, seg, mem_flags);
254 if (ret)
255 goto remove_streams;
256 }
257
258 return 0;
259
260remove_streams:
261 failed_seg = seg;
262 xhci_for_each_ring_seg(first_seg, seg) {
263 xhci_remove_segment_mapping(trb_address_map, seg);
264 if (seg == failed_seg)
265 return ret;
266 }
267
268 return ret;
269}
270
271static void xhci_remove_stream_mapping(struct xhci_ring *ring)
272{
273 struct xhci_segment *seg;
274
275 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
276 return;
277
278 xhci_for_each_ring_seg(ring->first_seg, seg)
279 xhci_remove_segment_mapping(ring->trb_address_map, seg);
280}
281
282static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
283{
284 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
285 ring->first_seg, mem_flags);
286}
287
288/* XXX: Do we need the hcd structure in all these functions? */
289void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
290{
291 if (!ring)
292 return;
293
294 trace_xhci_ring_free(ring);
295
296 if (ring->first_seg) {
297 if (ring->type == TYPE_STREAM)
298 xhci_remove_stream_mapping(ring);
299 xhci_ring_segments_free(xhci, ring);
300 }
301
302 kfree(ring);
303}
304
305void xhci_initialize_ring_info(struct xhci_ring *ring)
306{
307 /* The ring is empty, so the enqueue pointer == dequeue pointer */
308 ring->enqueue = ring->first_seg->trbs;
309 ring->enq_seg = ring->first_seg;
310 ring->dequeue = ring->enqueue;
311 ring->deq_seg = ring->first_seg;
312 /* The ring is initialized to 0. The producer must write 1 to the cycle
313 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
314 * compare CCS to the cycle bit to check ownership, so CCS = 1.
315 *
316 * New rings are initialized with cycle state equal to 1; if we are
317 * handling ring expansion, set the cycle state equal to the old ring.
318 */
319 ring->cycle_state = 1;
320
321 /*
322 * Each segment has a link TRB, and leave an extra TRB for SW
323 * accounting purpose
324 */
325 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
326}
327EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
328
329/* Allocate segments and link them for a ring */
330static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, gfp_t flags)
331{
332 struct xhci_segment *prev;
333 unsigned int num = 0;
334
335 prev = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags);
336 if (!prev)
337 return -ENOMEM;
338 num++;
339
340 ring->first_seg = prev;
341 while (num < ring->num_segs) {
342 struct xhci_segment *next;
343
344 next = xhci_segment_alloc(xhci, ring->bounce_buf_len, num, flags);
345 if (!next)
346 goto free_segments;
347
348 prev->next = next;
349 prev = next;
350 num++;
351 }
352 ring->last_seg = prev;
353
354 ring->last_seg->next = ring->first_seg;
355 return 0;
356
357free_segments:
358 ring->last_seg = prev;
359 xhci_ring_segments_free(xhci, ring);
360 return -ENOMEM;
361}
362
363/*
364 * Create a new ring with zero or more segments.
365 *
366 * Link each segment together into a ring.
367 * Set the end flag and the cycle toggle bit on the last segment.
368 * See section 4.9.1 and figures 15 and 16.
369 */
370struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
371 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
372{
373 struct xhci_ring *ring;
374 int ret;
375 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
376
377 ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
378 if (!ring)
379 return NULL;
380
381 ring->num_segs = num_segs;
382 ring->bounce_buf_len = max_packet;
383 INIT_LIST_HEAD(&ring->td_list);
384 ring->type = type;
385 if (num_segs == 0)
386 return ring;
387
388 ret = xhci_alloc_segments_for_ring(xhci, ring, flags);
389 if (ret)
390 goto fail;
391
392 xhci_initialize_ring_segments(xhci, ring);
393 xhci_initialize_ring_info(ring);
394 trace_xhci_ring_alloc(ring);
395 return ring;
396
397fail:
398 kfree(ring);
399 return NULL;
400}
401
402void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
403 struct xhci_virt_device *virt_dev,
404 unsigned int ep_index)
405{
406 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
407 virt_dev->eps[ep_index].ring = NULL;
408}
409
410/*
411 * Expand an existing ring.
412 * Allocate a new ring which has same segment numbers and link the two rings.
413 */
414int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
415 unsigned int num_new_segs, gfp_t flags)
416{
417 struct xhci_ring new_ring;
418 int ret;
419
420 if (num_new_segs == 0)
421 return 0;
422
423 new_ring.num_segs = num_new_segs;
424 new_ring.bounce_buf_len = ring->bounce_buf_len;
425 new_ring.type = ring->type;
426 ret = xhci_alloc_segments_for_ring(xhci, &new_ring, flags);
427 if (ret)
428 return -ENOMEM;
429
430 xhci_initialize_ring_segments(xhci, &new_ring);
431
432 if (ring->type == TYPE_STREAM) {
433 ret = xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
434 new_ring.first_seg, flags);
435 if (ret)
436 goto free_segments;
437 }
438
439 xhci_link_rings(xhci, &new_ring, ring);
440 trace_xhci_ring_expansion(ring);
441 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
442 "ring expansion succeed, now has %d segments",
443 ring->num_segs);
444
445 return 0;
446
447free_segments:
448 xhci_ring_segments_free(xhci, &new_ring);
449 return ret;
450}
451
452struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
453 int type, gfp_t flags)
454{
455 struct xhci_container_ctx *ctx;
456 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
457
458 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
459 return NULL;
460
461 ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
462 if (!ctx)
463 return NULL;
464
465 ctx->type = type;
466 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
467 if (type == XHCI_CTX_TYPE_INPUT)
468 ctx->size += CTX_SIZE(xhci->hcc_params);
469
470 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
471 if (!ctx->bytes) {
472 kfree(ctx);
473 return NULL;
474 }
475 return ctx;
476}
477
478void xhci_free_container_ctx(struct xhci_hcd *xhci,
479 struct xhci_container_ctx *ctx)
480{
481 if (!ctx)
482 return;
483 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
484 kfree(ctx);
485}
486
487struct xhci_input_control_ctx *xhci_get_input_control_ctx(
488 struct xhci_container_ctx *ctx)
489{
490 if (ctx->type != XHCI_CTX_TYPE_INPUT)
491 return NULL;
492
493 return (struct xhci_input_control_ctx *)ctx->bytes;
494}
495
496struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
497 struct xhci_container_ctx *ctx)
498{
499 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
500 return (struct xhci_slot_ctx *)ctx->bytes;
501
502 return (struct xhci_slot_ctx *)
503 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
504}
505
506struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
507 struct xhci_container_ctx *ctx,
508 unsigned int ep_index)
509{
510 /* increment ep index by offset of start of ep ctx array */
511 ep_index++;
512 if (ctx->type == XHCI_CTX_TYPE_INPUT)
513 ep_index++;
514
515 return (struct xhci_ep_ctx *)
516 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
517}
518EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
519
520/***************** Streams structures manipulation *************************/
521
522static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
523 unsigned int num_stream_ctxs,
524 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
525{
526 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
527 size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
528
529 if (size > MEDIUM_STREAM_ARRAY_SIZE)
530 dma_free_coherent(dev, size, stream_ctx, dma);
531 else if (size > SMALL_STREAM_ARRAY_SIZE)
532 dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma);
533 else
534 dma_pool_free(xhci->small_streams_pool, stream_ctx, dma);
535}
536
537/*
538 * The stream context array for each endpoint with bulk streams enabled can
539 * vary in size, based on:
540 * - how many streams the endpoint supports,
541 * - the maximum primary stream array size the host controller supports,
542 * - and how many streams the device driver asks for.
543 *
544 * The stream context array must be a power of 2, and can be as small as
545 * 64 bytes or as large as 1MB.
546 */
547static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
548 unsigned int num_stream_ctxs, dma_addr_t *dma,
549 gfp_t mem_flags)
550{
551 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
552 size_t size = array_size(sizeof(struct xhci_stream_ctx), num_stream_ctxs);
553
554 if (size > MEDIUM_STREAM_ARRAY_SIZE)
555 return dma_alloc_coherent(dev, size, dma, mem_flags);
556 if (size > SMALL_STREAM_ARRAY_SIZE)
557 return dma_pool_zalloc(xhci->medium_streams_pool, mem_flags, dma);
558 else
559 return dma_pool_zalloc(xhci->small_streams_pool, mem_flags, dma);
560}
561
562struct xhci_ring *xhci_dma_to_transfer_ring(
563 struct xhci_virt_ep *ep,
564 u64 address)
565{
566 if (ep->ep_state & EP_HAS_STREAMS)
567 return radix_tree_lookup(&ep->stream_info->trb_address_map,
568 address >> TRB_SEGMENT_SHIFT);
569 return ep->ring;
570}
571
572/*
573 * Change an endpoint's internal structure so it supports stream IDs. The
574 * number of requested streams includes stream 0, which cannot be used by device
575 * drivers.
576 *
577 * The number of stream contexts in the stream context array may be bigger than
578 * the number of streams the driver wants to use. This is because the number of
579 * stream context array entries must be a power of two.
580 */
581struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
582 unsigned int num_stream_ctxs,
583 unsigned int num_streams,
584 unsigned int max_packet, gfp_t mem_flags)
585{
586 struct xhci_stream_info *stream_info;
587 u32 cur_stream;
588 struct xhci_ring *cur_ring;
589 u64 addr;
590 int ret;
591 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
592
593 xhci_dbg(xhci, "Allocating %u streams and %u stream context array entries.\n",
594 num_streams, num_stream_ctxs);
595 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
596 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
597 return NULL;
598 }
599 xhci->cmd_ring_reserved_trbs++;
600
601 stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
602 dev_to_node(dev));
603 if (!stream_info)
604 goto cleanup_trbs;
605
606 stream_info->num_streams = num_streams;
607 stream_info->num_stream_ctxs = num_stream_ctxs;
608
609 /* Initialize the array of virtual pointers to stream rings. */
610 stream_info->stream_rings = kcalloc_node(
611 num_streams, sizeof(struct xhci_ring *), mem_flags,
612 dev_to_node(dev));
613 if (!stream_info->stream_rings)
614 goto cleanup_info;
615
616 /* Initialize the array of DMA addresses for stream rings for the HW. */
617 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
618 num_stream_ctxs, &stream_info->ctx_array_dma,
619 mem_flags);
620 if (!stream_info->stream_ctx_array)
621 goto cleanup_ring_array;
622
623 /* Allocate everything needed to free the stream rings later */
624 stream_info->free_streams_command =
625 xhci_alloc_command_with_ctx(xhci, true, mem_flags);
626 if (!stream_info->free_streams_command)
627 goto cleanup_ctx;
628
629 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
630
631 /* Allocate rings for all the streams that the driver will use,
632 * and add their segment DMA addresses to the radix tree.
633 * Stream 0 is reserved.
634 */
635
636 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
637 stream_info->stream_rings[cur_stream] =
638 xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
639 cur_ring = stream_info->stream_rings[cur_stream];
640 if (!cur_ring)
641 goto cleanup_rings;
642 cur_ring->stream_id = cur_stream;
643 cur_ring->trb_address_map = &stream_info->trb_address_map;
644 /* Set deq ptr, cycle bit, and stream context type */
645 addr = cur_ring->first_seg->dma |
646 SCT_FOR_CTX(SCT_PRI_TR) |
647 cur_ring->cycle_state;
648 stream_info->stream_ctx_array[cur_stream].stream_ring =
649 cpu_to_le64(addr);
650 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, addr);
651
652 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
653
654 trace_xhci_alloc_stream_info_ctx(stream_info, cur_stream);
655 if (ret) {
656 xhci_ring_free(xhci, cur_ring);
657 stream_info->stream_rings[cur_stream] = NULL;
658 goto cleanup_rings;
659 }
660 }
661 /* Leave the other unused stream ring pointers in the stream context
662 * array initialized to zero. This will cause the xHC to give us an
663 * error if the device asks for a stream ID we don't have setup (if it
664 * was any other way, the host controller would assume the ring is
665 * "empty" and wait forever for data to be queued to that stream ID).
666 */
667
668 return stream_info;
669
670cleanup_rings:
671 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
672 cur_ring = stream_info->stream_rings[cur_stream];
673 if (cur_ring) {
674 xhci_ring_free(xhci, cur_ring);
675 stream_info->stream_rings[cur_stream] = NULL;
676 }
677 }
678 xhci_free_command(xhci, stream_info->free_streams_command);
679cleanup_ctx:
680 xhci_free_stream_ctx(xhci,
681 stream_info->num_stream_ctxs,
682 stream_info->stream_ctx_array,
683 stream_info->ctx_array_dma);
684cleanup_ring_array:
685 kfree(stream_info->stream_rings);
686cleanup_info:
687 kfree(stream_info);
688cleanup_trbs:
689 xhci->cmd_ring_reserved_trbs--;
690 return NULL;
691}
692/*
693 * Sets the MaxPStreams field and the Linear Stream Array field.
694 * Sets the dequeue pointer to the stream context array.
695 */
696void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
697 struct xhci_ep_ctx *ep_ctx,
698 struct xhci_stream_info *stream_info)
699{
700 u32 max_primary_streams;
701 /* MaxPStreams is the number of stream context array entries, not the
702 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
703 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
704 */
705 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
706 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
707 "Setting number of stream ctx array entries to %u",
708 1 << (max_primary_streams + 1));
709 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
710 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
711 | EP_HAS_LSA);
712 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
713}
714
715/*
716 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
717 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
718 * not at the beginning of the ring).
719 */
720void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
721 struct xhci_virt_ep *ep)
722{
723 dma_addr_t addr;
724 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
725 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
726 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
727}
728
729/* Frees all stream contexts associated with the endpoint,
730 *
731 * Caller should fix the endpoint context streams fields.
732 */
733void xhci_free_stream_info(struct xhci_hcd *xhci,
734 struct xhci_stream_info *stream_info)
735{
736 int cur_stream;
737 struct xhci_ring *cur_ring;
738
739 if (!stream_info)
740 return;
741
742 for (cur_stream = 1; cur_stream < stream_info->num_streams;
743 cur_stream++) {
744 cur_ring = stream_info->stream_rings[cur_stream];
745 if (cur_ring) {
746 xhci_ring_free(xhci, cur_ring);
747 stream_info->stream_rings[cur_stream] = NULL;
748 }
749 }
750 xhci_free_command(xhci, stream_info->free_streams_command);
751 xhci->cmd_ring_reserved_trbs--;
752 if (stream_info->stream_ctx_array)
753 xhci_free_stream_ctx(xhci,
754 stream_info->num_stream_ctxs,
755 stream_info->stream_ctx_array,
756 stream_info->ctx_array_dma);
757
758 kfree(stream_info->stream_rings);
759 kfree(stream_info);
760}
761
762
763/***************** Device context manipulation *************************/
764
765static void xhci_free_tt_info(struct xhci_hcd *xhci,
766 struct xhci_virt_device *virt_dev,
767 int slot_id)
768{
769 struct list_head *tt_list_head;
770 struct xhci_tt_bw_info *tt_info, *next;
771 bool slot_found = false;
772
773 /* If the device never made it past the Set Address stage,
774 * it may not have the root hub port pointer set correctly.
775 */
776 if (!virt_dev->rhub_port) {
777 xhci_dbg(xhci, "Bad rhub port.\n");
778 return;
779 }
780
781 tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
782 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
783 /* Multi-TT hubs will have more than one entry */
784 if (tt_info->slot_id == slot_id) {
785 slot_found = true;
786 list_del(&tt_info->tt_list);
787 kfree(tt_info);
788 } else if (slot_found) {
789 break;
790 }
791 }
792}
793
794int xhci_alloc_tt_info(struct xhci_hcd *xhci,
795 struct xhci_virt_device *virt_dev,
796 struct usb_device *hdev,
797 struct usb_tt *tt, gfp_t mem_flags)
798{
799 struct xhci_tt_bw_info *tt_info;
800 unsigned int num_ports;
801 int i, j;
802 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
803
804 if (!tt->multi)
805 num_ports = 1;
806 else
807 num_ports = hdev->maxchild;
808
809 for (i = 0; i < num_ports; i++, tt_info++) {
810 struct xhci_interval_bw_table *bw_table;
811
812 tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
813 dev_to_node(dev));
814 if (!tt_info)
815 goto free_tts;
816 INIT_LIST_HEAD(&tt_info->tt_list);
817 list_add(&tt_info->tt_list,
818 &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
819 tt_info->slot_id = virt_dev->udev->slot_id;
820 if (tt->multi)
821 tt_info->ttport = i+1;
822 bw_table = &tt_info->bw_table;
823 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
824 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
825 }
826 return 0;
827
828free_tts:
829 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
830 return -ENOMEM;
831}
832
833
834/* All the xhci_tds in the ring's TD list should be freed at this point.
835 * Should be called with xhci->lock held if there is any chance the TT lists
836 * will be manipulated by the configure endpoint, allocate device, or update
837 * hub functions while this function is removing the TT entries from the list.
838 */
839void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
840{
841 struct xhci_virt_device *dev;
842 int i;
843 int old_active_eps = 0;
844
845 /* Slot ID 0 is reserved */
846 if (slot_id == 0 || !xhci->devs[slot_id])
847 return;
848
849 dev = xhci->devs[slot_id];
850
851 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
852 if (!dev)
853 return;
854
855 trace_xhci_free_virt_device(dev);
856
857 if (dev->tt_info)
858 old_active_eps = dev->tt_info->active_eps;
859
860 for (i = 0; i < 31; i++) {
861 if (dev->eps[i].ring)
862 xhci_ring_free(xhci, dev->eps[i].ring);
863 if (dev->eps[i].stream_info)
864 xhci_free_stream_info(xhci,
865 dev->eps[i].stream_info);
866 /*
867 * Endpoints are normally deleted from the bandwidth list when
868 * endpoints are dropped, before device is freed.
869 * If host is dying or being removed then endpoints aren't
870 * dropped cleanly, so delete the endpoint from list here.
871 * Only applicable for hosts with software bandwidth checking.
872 */
873
874 if (!list_empty(&dev->eps[i].bw_endpoint_list)) {
875 list_del_init(&dev->eps[i].bw_endpoint_list);
876 xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n",
877 slot_id, i);
878 }
879 }
880 /* If this is a hub, free the TT(s) from the TT list */
881 xhci_free_tt_info(xhci, dev, slot_id);
882 /* If necessary, update the number of active TTs on this root port */
883 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
884
885 if (dev->in_ctx)
886 xhci_free_container_ctx(xhci, dev->in_ctx);
887 if (dev->out_ctx)
888 xhci_free_container_ctx(xhci, dev->out_ctx);
889
890 if (dev->udev && dev->udev->slot_id)
891 dev->udev->slot_id = 0;
892 if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
893 dev->rhub_port->slot_id = 0;
894 kfree(xhci->devs[slot_id]);
895 xhci->devs[slot_id] = NULL;
896}
897
898/*
899 * Free a virt_device structure.
900 * If the virt_device added a tt_info (a hub) and has children pointing to
901 * that tt_info, then free the child first. Recursive.
902 * We can't rely on udev at this point to find child-parent relationships.
903 */
904static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
905{
906 struct xhci_virt_device *vdev;
907 struct list_head *tt_list_head;
908 struct xhci_tt_bw_info *tt_info, *next;
909 int i;
910
911 vdev = xhci->devs[slot_id];
912 if (!vdev)
913 return;
914
915 if (!vdev->rhub_port) {
916 xhci_dbg(xhci, "Bad rhub port.\n");
917 goto out;
918 }
919
920 tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts);
921 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
922 /* is this a hub device that added a tt_info to the tts list */
923 if (tt_info->slot_id == slot_id) {
924 /* are any devices using this tt_info? */
925 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
926 vdev = xhci->devs[i];
927 if (vdev && (vdev->tt_info == tt_info))
928 xhci_free_virt_devices_depth_first(
929 xhci, i);
930 }
931 }
932 }
933out:
934 /* we are now at a leaf device */
935 xhci_debugfs_remove_slot(xhci, slot_id);
936 xhci_free_virt_device(xhci, slot_id);
937}
938
939int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
940 struct usb_device *udev, gfp_t flags)
941{
942 struct xhci_virt_device *dev;
943 int i;
944
945 /* Slot ID 0 is reserved */
946 if (slot_id == 0 || xhci->devs[slot_id]) {
947 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
948 return 0;
949 }
950
951 dev = kzalloc(sizeof(*dev), flags);
952 if (!dev)
953 return 0;
954
955 dev->slot_id = slot_id;
956
957 /* Allocate the (output) device context that will be used in the HC. */
958 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
959 if (!dev->out_ctx)
960 goto fail;
961
962 xhci_dbg(xhci, "Slot %d output ctx = 0x%pad (dma)\n", slot_id, &dev->out_ctx->dma);
963
964 /* Allocate the (input) device context for address device command */
965 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
966 if (!dev->in_ctx)
967 goto fail;
968
969 xhci_dbg(xhci, "Slot %d input ctx = 0x%pad (dma)\n", slot_id, &dev->in_ctx->dma);
970
971 /* Initialize the cancellation and bandwidth list for each ep */
972 for (i = 0; i < 31; i++) {
973 dev->eps[i].ep_index = i;
974 dev->eps[i].vdev = dev;
975 dev->eps[i].xhci = xhci;
976 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
977 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
978 }
979
980 /* Allocate endpoint 0 ring */
981 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
982 if (!dev->eps[0].ring)
983 goto fail;
984
985 dev->udev = udev;
986
987 /* Point to output device context in dcbaa. */
988 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
989 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
990 slot_id,
991 &xhci->dcbaa->dev_context_ptrs[slot_id],
992 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
993
994 trace_xhci_alloc_virt_device(dev);
995
996 xhci->devs[slot_id] = dev;
997
998 return 1;
999fail:
1000
1001 if (dev->in_ctx)
1002 xhci_free_container_ctx(xhci, dev->in_ctx);
1003 if (dev->out_ctx)
1004 xhci_free_container_ctx(xhci, dev->out_ctx);
1005 kfree(dev);
1006
1007 return 0;
1008}
1009
1010void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1011 struct usb_device *udev)
1012{
1013 struct xhci_virt_device *virt_dev;
1014 struct xhci_ep_ctx *ep0_ctx;
1015 struct xhci_ring *ep_ring;
1016
1017 virt_dev = xhci->devs[udev->slot_id];
1018 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1019 ep_ring = virt_dev->eps[0].ring;
1020 /*
1021 * FIXME we don't keep track of the dequeue pointer very well after a
1022 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1023 * host to our enqueue pointer. This should only be called after a
1024 * configured device has reset, so all control transfers should have
1025 * been completed or cancelled before the reset.
1026 */
1027 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1028 ep_ring->enqueue)
1029 | ep_ring->cycle_state);
1030}
1031
1032/*
1033 * The xHCI roothub may have ports of differing speeds in any order in the port
1034 * status registers.
1035 *
1036 * The xHCI hardware wants to know the roothub port that the USB device
1037 * is attached to (or the roothub port its ancestor hub is attached to). All we
1038 * know is the index of that port under either the USB 2.0 or the USB 3.0
1039 * roothub, but that doesn't give us the real index into the HW port status
1040 * registers.
1041 */
1042static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev)
1043{
1044 struct usb_device *top_dev;
1045 struct xhci_hub *rhub;
1046 struct usb_hcd *hcd;
1047
1048 if (udev->speed >= USB_SPEED_SUPER)
1049 hcd = xhci_get_usb3_hcd(xhci);
1050 else
1051 hcd = xhci->main_hcd;
1052
1053 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1054 top_dev = top_dev->parent)
1055 /* Found device below root hub */;
1056
1057 rhub = xhci_get_rhub(hcd);
1058 return rhub->ports[top_dev->portnum - 1];
1059}
1060
1061/* Setup an xHCI virtual device for a Set Address command */
1062int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1063{
1064 struct xhci_virt_device *dev;
1065 struct xhci_ep_ctx *ep0_ctx;
1066 struct xhci_slot_ctx *slot_ctx;
1067 u32 max_packets;
1068
1069 dev = xhci->devs[udev->slot_id];
1070 /* Slot ID 0 is reserved */
1071 if (udev->slot_id == 0 || !dev) {
1072 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1073 udev->slot_id);
1074 return -EINVAL;
1075 }
1076 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1077 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1078
1079 /* 3) Only the control endpoint is valid - one endpoint context */
1080 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1081 switch (udev->speed) {
1082 case USB_SPEED_SUPER_PLUS:
1083 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1084 max_packets = MAX_PACKET(512);
1085 break;
1086 case USB_SPEED_SUPER:
1087 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1088 max_packets = MAX_PACKET(512);
1089 break;
1090 case USB_SPEED_HIGH:
1091 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1092 max_packets = MAX_PACKET(64);
1093 break;
1094 /* USB core guesses at a 64-byte max packet first for FS devices */
1095 case USB_SPEED_FULL:
1096 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1097 max_packets = MAX_PACKET(64);
1098 break;
1099 case USB_SPEED_LOW:
1100 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1101 max_packets = MAX_PACKET(8);
1102 break;
1103 default:
1104 /* Speed was set earlier, this shouldn't happen. */
1105 return -EINVAL;
1106 }
1107 /* Find the root hub port this device is under */
1108 dev->rhub_port = xhci_find_rhub_port(xhci, udev);
1109 if (!dev->rhub_port)
1110 return -EINVAL;
1111 /* Slot ID is set to the device directly below the root hub */
1112 if (!udev->parent->parent)
1113 dev->rhub_port->slot_id = udev->slot_id;
1114 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1));
1115 xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n",
1116 udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum);
1117
1118 /* Find the right bandwidth table that this device will be a part of.
1119 * If this is a full speed device attached directly to a root port (or a
1120 * decendent of one), it counts as a primary bandwidth domain, not a
1121 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1122 * will never be created for the HS root hub.
1123 */
1124 if (!udev->tt || !udev->tt->hub->parent) {
1125 dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table;
1126 } else {
1127 struct xhci_root_port_bw_info *rh_bw;
1128 struct xhci_tt_bw_info *tt_bw;
1129
1130 rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum];
1131 /* Find the right TT. */
1132 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1133 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1134 continue;
1135
1136 if (!dev->udev->tt->multi ||
1137 (udev->tt->multi &&
1138 tt_bw->ttport == dev->udev->ttport)) {
1139 dev->bw_table = &tt_bw->bw_table;
1140 dev->tt_info = tt_bw;
1141 break;
1142 }
1143 }
1144 if (!dev->tt_info)
1145 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1146 }
1147
1148 /* Is this a LS/FS device under an external HS hub? */
1149 if (udev->tt && udev->tt->hub->parent) {
1150 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1151 (udev->ttport << 8));
1152 if (udev->tt->multi)
1153 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1154 }
1155 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1156 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1157
1158 /* Step 4 - ring already allocated */
1159 /* Step 5 */
1160 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1161
1162 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1163 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1164 max_packets);
1165
1166 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1167 dev->eps[0].ring->cycle_state);
1168
1169 trace_xhci_setup_addressable_virt_device(dev);
1170
1171 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1172
1173 return 0;
1174}
1175
1176/*
1177 * Convert interval expressed as 2^(bInterval - 1) == interval into
1178 * straight exponent value 2^n == interval.
1179 *
1180 */
1181static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1182 struct usb_host_endpoint *ep)
1183{
1184 unsigned int interval;
1185
1186 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1187 if (interval != ep->desc.bInterval - 1)
1188 dev_warn(&udev->dev,
1189 "ep %#x - rounding interval to %d %sframes\n",
1190 ep->desc.bEndpointAddress,
1191 1 << interval,
1192 udev->speed == USB_SPEED_FULL ? "" : "micro");
1193
1194 if (udev->speed == USB_SPEED_FULL) {
1195 /*
1196 * Full speed isoc endpoints specify interval in frames,
1197 * not microframes. We are using microframes everywhere,
1198 * so adjust accordingly.
1199 */
1200 interval += 3; /* 1 frame = 2^3 uframes */
1201 }
1202
1203 return interval;
1204}
1205
1206/*
1207 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1208 * microframes, rounded down to nearest power of 2.
1209 */
1210static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1211 struct usb_host_endpoint *ep, unsigned int desc_interval,
1212 unsigned int min_exponent, unsigned int max_exponent)
1213{
1214 unsigned int interval;
1215
1216 interval = fls(desc_interval) - 1;
1217 interval = clamp_val(interval, min_exponent, max_exponent);
1218 if ((1 << interval) != desc_interval)
1219 dev_dbg(&udev->dev,
1220 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1221 ep->desc.bEndpointAddress,
1222 1 << interval,
1223 desc_interval);
1224
1225 return interval;
1226}
1227
1228static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1229 struct usb_host_endpoint *ep)
1230{
1231 if (ep->desc.bInterval == 0)
1232 return 0;
1233 return xhci_microframes_to_exponent(udev, ep,
1234 ep->desc.bInterval, 0, 15);
1235}
1236
1237
1238static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1239 struct usb_host_endpoint *ep)
1240{
1241 return xhci_microframes_to_exponent(udev, ep,
1242 ep->desc.bInterval * 8, 3, 10);
1243}
1244
1245/* Return the polling or NAK interval.
1246 *
1247 * The polling interval is expressed in "microframes". If xHCI's Interval field
1248 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1249 *
1250 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1251 * is set to 0.
1252 */
1253static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1254 struct usb_host_endpoint *ep)
1255{
1256 unsigned int interval = 0;
1257
1258 switch (udev->speed) {
1259 case USB_SPEED_HIGH:
1260 /* Max NAK rate */
1261 if (usb_endpoint_xfer_control(&ep->desc) ||
1262 usb_endpoint_xfer_bulk(&ep->desc)) {
1263 interval = xhci_parse_microframe_interval(udev, ep);
1264 break;
1265 }
1266 fallthrough; /* SS and HS isoc/int have same decoding */
1267
1268 case USB_SPEED_SUPER_PLUS:
1269 case USB_SPEED_SUPER:
1270 if (usb_endpoint_xfer_int(&ep->desc) ||
1271 usb_endpoint_xfer_isoc(&ep->desc)) {
1272 interval = xhci_parse_exponent_interval(udev, ep);
1273 }
1274 break;
1275
1276 case USB_SPEED_FULL:
1277 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1278 interval = xhci_parse_exponent_interval(udev, ep);
1279 break;
1280 }
1281 /*
1282 * Fall through for interrupt endpoint interval decoding
1283 * since it uses the same rules as low speed interrupt
1284 * endpoints.
1285 */
1286 fallthrough;
1287
1288 case USB_SPEED_LOW:
1289 if (usb_endpoint_xfer_int(&ep->desc) ||
1290 usb_endpoint_xfer_isoc(&ep->desc)) {
1291
1292 interval = xhci_parse_frame_interval(udev, ep);
1293 }
1294 break;
1295
1296 default:
1297 BUG();
1298 }
1299 return interval;
1300}
1301
1302/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1303 * High speed endpoint descriptors can define "the number of additional
1304 * transaction opportunities per microframe", but that goes in the Max Burst
1305 * endpoint context field.
1306 */
1307static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1308 struct usb_host_endpoint *ep)
1309{
1310 if (udev->speed < USB_SPEED_SUPER ||
1311 !usb_endpoint_xfer_isoc(&ep->desc))
1312 return 0;
1313 return ep->ss_ep_comp.bmAttributes;
1314}
1315
1316static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1317 struct usb_host_endpoint *ep)
1318{
1319 /* Super speed and Plus have max burst in ep companion desc */
1320 if (udev->speed >= USB_SPEED_SUPER)
1321 return ep->ss_ep_comp.bMaxBurst;
1322
1323 if (udev->speed == USB_SPEED_HIGH &&
1324 (usb_endpoint_xfer_isoc(&ep->desc) ||
1325 usb_endpoint_xfer_int(&ep->desc)))
1326 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1327
1328 return 0;
1329}
1330
1331static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1332{
1333 int in;
1334
1335 in = usb_endpoint_dir_in(&ep->desc);
1336
1337 switch (usb_endpoint_type(&ep->desc)) {
1338 case USB_ENDPOINT_XFER_CONTROL:
1339 return CTRL_EP;
1340 case USB_ENDPOINT_XFER_BULK:
1341 return in ? BULK_IN_EP : BULK_OUT_EP;
1342 case USB_ENDPOINT_XFER_ISOC:
1343 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1344 case USB_ENDPOINT_XFER_INT:
1345 return in ? INT_IN_EP : INT_OUT_EP;
1346 }
1347 return 0;
1348}
1349
1350/* Return the maximum endpoint service interval time (ESIT) payload.
1351 * Basically, this is the maxpacket size, multiplied by the burst size
1352 * and mult size.
1353 */
1354static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1355 struct usb_host_endpoint *ep)
1356{
1357 int max_burst;
1358 int max_packet;
1359
1360 /* Only applies for interrupt or isochronous endpoints */
1361 if (usb_endpoint_xfer_control(&ep->desc) ||
1362 usb_endpoint_xfer_bulk(&ep->desc))
1363 return 0;
1364
1365 /* SuperSpeedPlus Isoc ep sending over 48k per esit */
1366 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1367 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1368 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1369
1370 /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1371 if (udev->speed >= USB_SPEED_SUPER)
1372 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1373
1374 max_packet = usb_endpoint_maxp(&ep->desc);
1375 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1376 /* A 0 in max burst means 1 transfer per ESIT */
1377 return max_packet * max_burst;
1378}
1379
1380/* Set up an endpoint with one ring segment. Do not allocate stream rings.
1381 * Drivers will have to call usb_alloc_streams() to do that.
1382 */
1383int xhci_endpoint_init(struct xhci_hcd *xhci,
1384 struct xhci_virt_device *virt_dev,
1385 struct usb_device *udev,
1386 struct usb_host_endpoint *ep,
1387 gfp_t mem_flags)
1388{
1389 unsigned int ep_index;
1390 struct xhci_ep_ctx *ep_ctx;
1391 struct xhci_ring *ep_ring;
1392 unsigned int max_packet;
1393 enum xhci_ring_type ring_type;
1394 u32 max_esit_payload;
1395 u32 endpoint_type;
1396 unsigned int max_burst;
1397 unsigned int interval;
1398 unsigned int mult;
1399 unsigned int avg_trb_len;
1400 unsigned int err_count = 0;
1401
1402 ep_index = xhci_get_endpoint_index(&ep->desc);
1403 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1404
1405 endpoint_type = xhci_get_endpoint_type(ep);
1406 if (!endpoint_type)
1407 return -EINVAL;
1408
1409 ring_type = usb_endpoint_type(&ep->desc);
1410
1411 /*
1412 * Get values to fill the endpoint context, mostly from ep descriptor.
1413 * The average TRB buffer lengt for bulk endpoints is unclear as we
1414 * have no clue on scatter gather list entry size. For Isoc and Int,
1415 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1416 */
1417 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1418 interval = xhci_get_endpoint_interval(udev, ep);
1419
1420 /* Periodic endpoint bInterval limit quirk */
1421 if (usb_endpoint_xfer_int(&ep->desc) ||
1422 usb_endpoint_xfer_isoc(&ep->desc)) {
1423 if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1424 udev->speed >= USB_SPEED_HIGH &&
1425 interval >= 7) {
1426 interval = 6;
1427 }
1428 }
1429
1430 mult = xhci_get_endpoint_mult(udev, ep);
1431 max_packet = usb_endpoint_maxp(&ep->desc);
1432 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1433 avg_trb_len = max_esit_payload;
1434
1435 /* FIXME dig Mult and streams info out of ep companion desc */
1436
1437 /* Allow 3 retries for everything but isoc, set CErr = 3 */
1438 if (!usb_endpoint_xfer_isoc(&ep->desc))
1439 err_count = 3;
1440 /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
1441 if (usb_endpoint_xfer_bulk(&ep->desc)) {
1442 if (udev->speed == USB_SPEED_HIGH)
1443 max_packet = 512;
1444 if (udev->speed == USB_SPEED_FULL) {
1445 max_packet = rounddown_pow_of_two(max_packet);
1446 max_packet = clamp_val(max_packet, 8, 64);
1447 }
1448 }
1449 /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1450 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1451 avg_trb_len = 8;
1452 /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1453 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1454 mult = 0;
1455
1456 /* Set up the endpoint ring */
1457 virt_dev->eps[ep_index].new_ring =
1458 xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
1459 if (!virt_dev->eps[ep_index].new_ring)
1460 return -ENOMEM;
1461
1462 virt_dev->eps[ep_index].skip = false;
1463 ep_ring = virt_dev->eps[ep_index].new_ring;
1464
1465 /* Fill the endpoint context */
1466 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1467 EP_INTERVAL(interval) |
1468 EP_MULT(mult));
1469 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1470 MAX_PACKET(max_packet) |
1471 MAX_BURST(max_burst) |
1472 ERROR_COUNT(err_count));
1473 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1474 ep_ring->cycle_state);
1475
1476 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1477 EP_AVG_TRB_LENGTH(avg_trb_len));
1478
1479 return 0;
1480}
1481
1482void xhci_endpoint_zero(struct xhci_hcd *xhci,
1483 struct xhci_virt_device *virt_dev,
1484 struct usb_host_endpoint *ep)
1485{
1486 unsigned int ep_index;
1487 struct xhci_ep_ctx *ep_ctx;
1488
1489 ep_index = xhci_get_endpoint_index(&ep->desc);
1490 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1491
1492 ep_ctx->ep_info = 0;
1493 ep_ctx->ep_info2 = 0;
1494 ep_ctx->deq = 0;
1495 ep_ctx->tx_info = 0;
1496 /* Don't free the endpoint ring until the set interface or configuration
1497 * request succeeds.
1498 */
1499}
1500
1501void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1502{
1503 bw_info->ep_interval = 0;
1504 bw_info->mult = 0;
1505 bw_info->num_packets = 0;
1506 bw_info->max_packet_size = 0;
1507 bw_info->type = 0;
1508 bw_info->max_esit_payload = 0;
1509}
1510
1511void xhci_update_bw_info(struct xhci_hcd *xhci,
1512 struct xhci_container_ctx *in_ctx,
1513 struct xhci_input_control_ctx *ctrl_ctx,
1514 struct xhci_virt_device *virt_dev)
1515{
1516 struct xhci_bw_info *bw_info;
1517 struct xhci_ep_ctx *ep_ctx;
1518 unsigned int ep_type;
1519 int i;
1520
1521 for (i = 1; i < 31; i++) {
1522 bw_info = &virt_dev->eps[i].bw_info;
1523
1524 /* We can't tell what endpoint type is being dropped, but
1525 * unconditionally clearing the bandwidth info for non-periodic
1526 * endpoints should be harmless because the info will never be
1527 * set in the first place.
1528 */
1529 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1530 /* Dropped endpoint */
1531 xhci_clear_endpoint_bw_info(bw_info);
1532 continue;
1533 }
1534
1535 if (EP_IS_ADDED(ctrl_ctx, i)) {
1536 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1537 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1538
1539 /* Ignore non-periodic endpoints */
1540 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1541 ep_type != ISOC_IN_EP &&
1542 ep_type != INT_IN_EP)
1543 continue;
1544
1545 /* Added or changed endpoint */
1546 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1547 le32_to_cpu(ep_ctx->ep_info));
1548 /* Number of packets and mult are zero-based in the
1549 * input context, but we want one-based for the
1550 * interval table.
1551 */
1552 bw_info->mult = CTX_TO_EP_MULT(
1553 le32_to_cpu(ep_ctx->ep_info)) + 1;
1554 bw_info->num_packets = CTX_TO_MAX_BURST(
1555 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1556 bw_info->max_packet_size = MAX_PACKET_DECODED(
1557 le32_to_cpu(ep_ctx->ep_info2));
1558 bw_info->type = ep_type;
1559 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1560 le32_to_cpu(ep_ctx->tx_info));
1561 }
1562 }
1563}
1564
1565/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1566 * Useful when you want to change one particular aspect of the endpoint and then
1567 * issue a configure endpoint command.
1568 */
1569void xhci_endpoint_copy(struct xhci_hcd *xhci,
1570 struct xhci_container_ctx *in_ctx,
1571 struct xhci_container_ctx *out_ctx,
1572 unsigned int ep_index)
1573{
1574 struct xhci_ep_ctx *out_ep_ctx;
1575 struct xhci_ep_ctx *in_ep_ctx;
1576
1577 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1578 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1579
1580 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1581 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1582 in_ep_ctx->deq = out_ep_ctx->deq;
1583 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1584 if (xhci->quirks & XHCI_MTK_HOST) {
1585 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1586 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1587 }
1588}
1589
1590/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1591 * Useful when you want to change one particular aspect of the endpoint and then
1592 * issue a configure endpoint command. Only the context entries field matters,
1593 * but we'll copy the whole thing anyway.
1594 */
1595void xhci_slot_copy(struct xhci_hcd *xhci,
1596 struct xhci_container_ctx *in_ctx,
1597 struct xhci_container_ctx *out_ctx)
1598{
1599 struct xhci_slot_ctx *in_slot_ctx;
1600 struct xhci_slot_ctx *out_slot_ctx;
1601
1602 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1603 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1604
1605 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1606 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1607 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1608 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1609}
1610
1611/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1612static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1613{
1614 int i;
1615 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1616 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1617
1618 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1619 "Allocating %d scratchpad buffers", num_sp);
1620
1621 if (!num_sp)
1622 return 0;
1623
1624 xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1625 dev_to_node(dev));
1626 if (!xhci->scratchpad)
1627 goto fail_sp;
1628
1629 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1630 array_size(sizeof(u64), num_sp),
1631 &xhci->scratchpad->sp_dma, flags);
1632 if (!xhci->scratchpad->sp_array)
1633 goto fail_sp2;
1634
1635 xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1636 flags, dev_to_node(dev));
1637 if (!xhci->scratchpad->sp_buffers)
1638 goto fail_sp3;
1639
1640 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1641 for (i = 0; i < num_sp; i++) {
1642 dma_addr_t dma;
1643 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1644 flags);
1645 if (!buf)
1646 goto fail_sp4;
1647
1648 xhci->scratchpad->sp_array[i] = dma;
1649 xhci->scratchpad->sp_buffers[i] = buf;
1650 }
1651
1652 return 0;
1653
1654 fail_sp4:
1655 while (i--)
1656 dma_free_coherent(dev, xhci->page_size,
1657 xhci->scratchpad->sp_buffers[i],
1658 xhci->scratchpad->sp_array[i]);
1659
1660 kfree(xhci->scratchpad->sp_buffers);
1661
1662 fail_sp3:
1663 dma_free_coherent(dev, array_size(sizeof(u64), num_sp),
1664 xhci->scratchpad->sp_array,
1665 xhci->scratchpad->sp_dma);
1666
1667 fail_sp2:
1668 kfree(xhci->scratchpad);
1669 xhci->scratchpad = NULL;
1670
1671 fail_sp:
1672 return -ENOMEM;
1673}
1674
1675static void scratchpad_free(struct xhci_hcd *xhci)
1676{
1677 int num_sp;
1678 int i;
1679 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1680
1681 if (!xhci->scratchpad)
1682 return;
1683
1684 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1685
1686 for (i = 0; i < num_sp; i++) {
1687 dma_free_coherent(dev, xhci->page_size,
1688 xhci->scratchpad->sp_buffers[i],
1689 xhci->scratchpad->sp_array[i]);
1690 }
1691 kfree(xhci->scratchpad->sp_buffers);
1692 dma_free_coherent(dev, array_size(sizeof(u64), num_sp),
1693 xhci->scratchpad->sp_array,
1694 xhci->scratchpad->sp_dma);
1695 kfree(xhci->scratchpad);
1696 xhci->scratchpad = NULL;
1697}
1698
1699struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1700 bool allocate_completion, gfp_t mem_flags)
1701{
1702 struct xhci_command *command;
1703 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1704
1705 command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1706 if (!command)
1707 return NULL;
1708
1709 if (allocate_completion) {
1710 command->completion =
1711 kzalloc_node(sizeof(struct completion), mem_flags,
1712 dev_to_node(dev));
1713 if (!command->completion) {
1714 kfree(command);
1715 return NULL;
1716 }
1717 init_completion(command->completion);
1718 }
1719
1720 command->status = 0;
1721 /* set default timeout to 5000 ms */
1722 command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
1723 INIT_LIST_HEAD(&command->cmd_list);
1724 return command;
1725}
1726
1727struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1728 bool allocate_completion, gfp_t mem_flags)
1729{
1730 struct xhci_command *command;
1731
1732 command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1733 if (!command)
1734 return NULL;
1735
1736 command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1737 mem_flags);
1738 if (!command->in_ctx) {
1739 kfree(command->completion);
1740 kfree(command);
1741 return NULL;
1742 }
1743 return command;
1744}
1745
1746void xhci_urb_free_priv(struct urb_priv *urb_priv)
1747{
1748 kfree(urb_priv);
1749}
1750
1751void xhci_free_command(struct xhci_hcd *xhci,
1752 struct xhci_command *command)
1753{
1754 xhci_free_container_ctx(xhci,
1755 command->in_ctx);
1756 kfree(command->completion);
1757 kfree(command);
1758}
1759
1760static int xhci_alloc_erst(struct xhci_hcd *xhci,
1761 struct xhci_ring *evt_ring,
1762 struct xhci_erst *erst,
1763 gfp_t flags)
1764{
1765 size_t size;
1766 unsigned int val;
1767 struct xhci_segment *seg;
1768 struct xhci_erst_entry *entry;
1769
1770 size = array_size(sizeof(struct xhci_erst_entry), evt_ring->num_segs);
1771 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1772 size, &erst->erst_dma_addr, flags);
1773 if (!erst->entries)
1774 return -ENOMEM;
1775
1776 erst->num_entries = evt_ring->num_segs;
1777
1778 seg = evt_ring->first_seg;
1779 for (val = 0; val < evt_ring->num_segs; val++) {
1780 entry = &erst->entries[val];
1781 entry->seg_addr = cpu_to_le64(seg->dma);
1782 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1783 entry->rsvd = 0;
1784 seg = seg->next;
1785 }
1786
1787 return 0;
1788}
1789
1790static void
1791xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
1792{
1793 u32 tmp;
1794
1795 if (!ir)
1796 return;
1797
1798 /*
1799 * Clean out interrupter registers except ERSTBA. Clearing either the
1800 * low or high 32 bits of ERSTBA immediately causes the controller to
1801 * dereference the partially cleared 64 bit address, causing IOMMU error.
1802 */
1803 if (ir->ir_set) {
1804 tmp = readl(&ir->ir_set->erst_size);
1805 tmp &= ERST_SIZE_MASK;
1806 writel(tmp, &ir->ir_set->erst_size);
1807
1808 xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
1809 }
1810}
1811
1812static void
1813xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
1814{
1815 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1816 size_t erst_size;
1817
1818 if (!ir)
1819 return;
1820
1821 erst_size = array_size(sizeof(struct xhci_erst_entry), ir->erst.num_entries);
1822 if (ir->erst.entries)
1823 dma_free_coherent(dev, erst_size,
1824 ir->erst.entries,
1825 ir->erst.erst_dma_addr);
1826 ir->erst.entries = NULL;
1827
1828 /* free interrupter event ring */
1829 if (ir->event_ring)
1830 xhci_ring_free(xhci, ir->event_ring);
1831
1832 ir->event_ring = NULL;
1833
1834 kfree(ir);
1835}
1836
1837void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir)
1838{
1839 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1840 unsigned int intr_num;
1841
1842 spin_lock_irq(&xhci->lock);
1843
1844 /* interrupter 0 is primary interrupter, don't touch it */
1845 if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
1846 xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
1847 spin_unlock_irq(&xhci->lock);
1848 return;
1849 }
1850
1851 intr_num = ir->intr_num;
1852
1853 xhci_remove_interrupter(xhci, ir);
1854 xhci->interrupters[intr_num] = NULL;
1855
1856 spin_unlock_irq(&xhci->lock);
1857
1858 xhci_free_interrupter(xhci, ir);
1859}
1860EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
1861
1862void xhci_mem_cleanup(struct xhci_hcd *xhci)
1863{
1864 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1865 int i, j, num_ports;
1866
1867 cancel_delayed_work_sync(&xhci->cmd_timer);
1868
1869 for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) {
1870 if (xhci->interrupters[i]) {
1871 xhci_remove_interrupter(xhci, xhci->interrupters[i]);
1872 xhci_free_interrupter(xhci, xhci->interrupters[i]);
1873 xhci->interrupters[i] = NULL;
1874 }
1875 }
1876 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters");
1877
1878 if (xhci->cmd_ring)
1879 xhci_ring_free(xhci, xhci->cmd_ring);
1880 xhci->cmd_ring = NULL;
1881 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1882 xhci_cleanup_command_queue(xhci);
1883
1884 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1885 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1886 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1887 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1888 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1889 while (!list_empty(ep))
1890 list_del_init(ep->next);
1891 }
1892 }
1893
1894 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1895 xhci_free_virt_devices_depth_first(xhci, i);
1896
1897 dma_pool_destroy(xhci->segment_pool);
1898 xhci->segment_pool = NULL;
1899 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1900
1901 dma_pool_destroy(xhci->device_pool);
1902 xhci->device_pool = NULL;
1903 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1904
1905 dma_pool_destroy(xhci->small_streams_pool);
1906 xhci->small_streams_pool = NULL;
1907 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1908 "Freed small stream array pool");
1909
1910 dma_pool_destroy(xhci->medium_streams_pool);
1911 xhci->medium_streams_pool = NULL;
1912 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1913 "Freed medium stream array pool");
1914
1915 if (xhci->dcbaa)
1916 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1917 xhci->dcbaa, xhci->dcbaa->dma);
1918 xhci->dcbaa = NULL;
1919
1920 scratchpad_free(xhci);
1921
1922 if (!xhci->rh_bw)
1923 goto no_bw;
1924
1925 for (i = 0; i < num_ports; i++) {
1926 struct xhci_tt_bw_info *tt, *n;
1927 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1928 list_del(&tt->tt_list);
1929 kfree(tt);
1930 }
1931 }
1932
1933no_bw:
1934 xhci->cmd_ring_reserved_trbs = 0;
1935 xhci->usb2_rhub.num_ports = 0;
1936 xhci->usb3_rhub.num_ports = 0;
1937 xhci->num_active_eps = 0;
1938 kfree(xhci->usb2_rhub.ports);
1939 kfree(xhci->usb3_rhub.ports);
1940 kfree(xhci->hw_ports);
1941 kfree(xhci->rh_bw);
1942 for (i = 0; i < xhci->num_port_caps; i++)
1943 kfree(xhci->port_caps[i].psi);
1944 kfree(xhci->port_caps);
1945 kfree(xhci->interrupters);
1946 xhci->num_port_caps = 0;
1947
1948 xhci->usb2_rhub.ports = NULL;
1949 xhci->usb3_rhub.ports = NULL;
1950 xhci->hw_ports = NULL;
1951 xhci->rh_bw = NULL;
1952 xhci->port_caps = NULL;
1953 xhci->interrupters = NULL;
1954
1955 xhci->page_size = 0;
1956 xhci->page_shift = 0;
1957 xhci->usb2_rhub.bus_state.bus_suspended = 0;
1958 xhci->usb3_rhub.bus_state.bus_suspended = 0;
1959}
1960
1961static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
1962{
1963 dma_addr_t deq;
1964
1965 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
1966 ir->event_ring->dequeue);
1967 if (!deq)
1968 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
1969 /* Update HC event ring dequeue pointer */
1970 /* Don't clear the EHB bit (which is RW1C) because
1971 * there might be more events to service.
1972 */
1973 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1974 "// Write event ring dequeue pointer, preserving EHB bit");
1975 xhci_write_64(xhci, deq & ERST_PTR_MASK, &ir->ir_set->erst_dequeue);
1976}
1977
1978static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1979 __le32 __iomem *addr, int max_caps)
1980{
1981 u32 temp, port_offset, port_count;
1982 int i;
1983 u8 major_revision, minor_revision, tmp_minor_revision;
1984 struct xhci_hub *rhub;
1985 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1986 struct xhci_port_cap *port_cap;
1987
1988 temp = readl(addr);
1989 major_revision = XHCI_EXT_PORT_MAJOR(temp);
1990 minor_revision = XHCI_EXT_PORT_MINOR(temp);
1991
1992 if (major_revision == 0x03) {
1993 rhub = &xhci->usb3_rhub;
1994 /*
1995 * Some hosts incorrectly use sub-minor version for minor
1996 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
1997 * for bcdUSB 0x310). Since there is no USB release with sub
1998 * minor version 0x301 to 0x309, we can assume that they are
1999 * incorrect and fix it here.
2000 */
2001 if (minor_revision > 0x00 && minor_revision < 0x10)
2002 minor_revision <<= 4;
2003 /*
2004 * Some zhaoxin's xHCI controller that follow usb3.1 spec
2005 * but only support Gen1.
2006 */
2007 if (xhci->quirks & XHCI_ZHAOXIN_HOST) {
2008 tmp_minor_revision = minor_revision;
2009 minor_revision = 0;
2010 }
2011
2012 } else if (major_revision <= 0x02) {
2013 rhub = &xhci->usb2_rhub;
2014 } else {
2015 xhci_warn(xhci, "Ignoring unknown port speed, Ext Cap %p, revision = 0x%x\n",
2016 addr, major_revision);
2017 /* Ignoring port protocol we can't understand. FIXME */
2018 return;
2019 }
2020
2021 /* Port offset and count in the third dword, see section 7.2 */
2022 temp = readl(addr + 2);
2023 port_offset = XHCI_EXT_PORT_OFF(temp);
2024 port_count = XHCI_EXT_PORT_COUNT(temp);
2025 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2026 "Ext Cap %p, port offset = %u, count = %u, revision = 0x%x",
2027 addr, port_offset, port_count, major_revision);
2028 /* Port count includes the current port offset */
2029 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2030 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2031 return;
2032
2033 port_cap = &xhci->port_caps[xhci->num_port_caps++];
2034 if (xhci->num_port_caps > max_caps)
2035 return;
2036
2037 port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2038
2039 if (port_cap->psi_count) {
2040 port_cap->psi = kcalloc_node(port_cap->psi_count,
2041 sizeof(*port_cap->psi),
2042 GFP_KERNEL, dev_to_node(dev));
2043 if (!port_cap->psi)
2044 port_cap->psi_count = 0;
2045
2046 port_cap->psi_uid_count++;
2047 for (i = 0; i < port_cap->psi_count; i++) {
2048 port_cap->psi[i] = readl(addr + 4 + i);
2049
2050 /* count unique ID values, two consecutive entries can
2051 * have the same ID if link is assymetric
2052 */
2053 if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2054 XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2055 port_cap->psi_uid_count++;
2056
2057 if (xhci->quirks & XHCI_ZHAOXIN_HOST &&
2058 major_revision == 0x03 &&
2059 XHCI_EXT_PORT_PSIV(port_cap->psi[i]) >= 5)
2060 minor_revision = tmp_minor_revision;
2061
2062 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2063 XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2064 XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2065 XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2066 XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2067 XHCI_EXT_PORT_LP(port_cap->psi[i]),
2068 XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2069 }
2070 }
2071
2072 rhub->maj_rev = major_revision;
2073
2074 if (rhub->min_rev < minor_revision)
2075 rhub->min_rev = minor_revision;
2076
2077 port_cap->maj_rev = major_revision;
2078 port_cap->min_rev = minor_revision;
2079 port_cap->protocol_caps = temp;
2080
2081 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2082 (temp & XHCI_HLC)) {
2083 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2084 "xHCI 1.0: support USB2 hardware lpm");
2085 xhci->hw_lpm_support = 1;
2086 }
2087
2088 port_offset--;
2089 for (i = port_offset; i < (port_offset + port_count); i++) {
2090 struct xhci_port *hw_port = &xhci->hw_ports[i];
2091 /* Duplicate entry. Ignore the port if the revisions differ. */
2092 if (hw_port->rhub) {
2093 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p, port %u\n", addr, i);
2094 xhci_warn(xhci, "Port was marked as USB %u, duplicated as USB %u\n",
2095 hw_port->rhub->maj_rev, major_revision);
2096 /* Only adjust the roothub port counts if we haven't
2097 * found a similar duplicate.
2098 */
2099 if (hw_port->rhub != rhub &&
2100 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2101 hw_port->rhub->num_ports--;
2102 hw_port->hcd_portnum = DUPLICATE_ENTRY;
2103 }
2104 continue;
2105 }
2106 hw_port->rhub = rhub;
2107 hw_port->port_cap = port_cap;
2108 rhub->num_ports++;
2109 }
2110 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2111}
2112
2113static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2114 struct xhci_hub *rhub, gfp_t flags)
2115{
2116 int port_index = 0;
2117 int i;
2118 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2119
2120 if (!rhub->num_ports)
2121 return;
2122 rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2123 flags, dev_to_node(dev));
2124 if (!rhub->ports)
2125 return;
2126
2127 for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2128 if (xhci->hw_ports[i].rhub != rhub ||
2129 xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2130 continue;
2131 xhci->hw_ports[i].hcd_portnum = port_index;
2132 rhub->ports[port_index] = &xhci->hw_ports[i];
2133 port_index++;
2134 if (port_index == rhub->num_ports)
2135 break;
2136 }
2137}
2138
2139/*
2140 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2141 * specify what speeds each port is supposed to be. We can't count on the port
2142 * speed bits in the PORTSC register being correct until a device is connected,
2143 * but we need to set up the two fake roothubs with the correct number of USB
2144 * 3.0 and USB 2.0 ports at host controller initialization time.
2145 */
2146static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2147{
2148 void __iomem *base;
2149 u32 offset;
2150 unsigned int num_ports;
2151 int i, j;
2152 int cap_count = 0;
2153 u32 cap_start;
2154 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2155
2156 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2157 xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2158 flags, dev_to_node(dev));
2159 if (!xhci->hw_ports)
2160 return -ENOMEM;
2161
2162 for (i = 0; i < num_ports; i++) {
2163 xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2164 NUM_PORT_REGS * i;
2165 xhci->hw_ports[i].hw_portnum = i;
2166
2167 init_completion(&xhci->hw_ports[i].rexit_done);
2168 init_completion(&xhci->hw_ports[i].u3exit_done);
2169 }
2170
2171 xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2172 dev_to_node(dev));
2173 if (!xhci->rh_bw)
2174 return -ENOMEM;
2175 for (i = 0; i < num_ports; i++) {
2176 struct xhci_interval_bw_table *bw_table;
2177
2178 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2179 bw_table = &xhci->rh_bw[i].bw_table;
2180 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2181 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2182 }
2183 base = &xhci->cap_regs->hc_capbase;
2184
2185 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2186 if (!cap_start) {
2187 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2188 return -ENODEV;
2189 }
2190
2191 offset = cap_start;
2192 /* count extended protocol capability entries for later caching */
2193 while (offset) {
2194 cap_count++;
2195 offset = xhci_find_next_ext_cap(base, offset,
2196 XHCI_EXT_CAPS_PROTOCOL);
2197 }
2198
2199 xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2200 flags, dev_to_node(dev));
2201 if (!xhci->port_caps)
2202 return -ENOMEM;
2203
2204 offset = cap_start;
2205
2206 while (offset) {
2207 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2208 if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2209 num_ports)
2210 break;
2211 offset = xhci_find_next_ext_cap(base, offset,
2212 XHCI_EXT_CAPS_PROTOCOL);
2213 }
2214 if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2215 xhci_warn(xhci, "No ports on the roothubs?\n");
2216 return -ENODEV;
2217 }
2218 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2219 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2220 xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2221
2222 /* Place limits on the number of roothub ports so that the hub
2223 * descriptors aren't longer than the USB core will allocate.
2224 */
2225 if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2226 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2227 "Limiting USB 3.0 roothub ports to %u.",
2228 USB_SS_MAXPORTS);
2229 xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2230 }
2231 if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2232 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2233 "Limiting USB 2.0 roothub ports to %u.",
2234 USB_MAXCHILDREN);
2235 xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2236 }
2237
2238 if (!xhci->usb2_rhub.num_ports)
2239 xhci_info(xhci, "USB2 root hub has no ports\n");
2240
2241 if (!xhci->usb3_rhub.num_ports)
2242 xhci_info(xhci, "USB3 root hub has no ports\n");
2243
2244 xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2245 xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2246
2247 return 0;
2248}
2249
2250static struct xhci_interrupter *
2251xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
2252{
2253 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2254 struct xhci_interrupter *ir;
2255 unsigned int max_segs;
2256 int ret;
2257
2258 if (!segs)
2259 segs = ERST_DEFAULT_SEGS;
2260
2261 max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2));
2262 segs = min(segs, max_segs);
2263
2264 ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
2265 if (!ir)
2266 return NULL;
2267
2268 ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
2269 if (!ir->event_ring) {
2270 xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
2271 kfree(ir);
2272 return NULL;
2273 }
2274
2275 ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
2276 if (ret) {
2277 xhci_warn(xhci, "Failed to allocate interrupter erst\n");
2278 xhci_ring_free(xhci, ir->event_ring);
2279 kfree(ir);
2280 return NULL;
2281 }
2282
2283 return ir;
2284}
2285
2286static int
2287xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
2288 unsigned int intr_num)
2289{
2290 u64 erst_base;
2291 u32 erst_size;
2292
2293 if (intr_num >= xhci->max_interrupters) {
2294 xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
2295 intr_num, xhci->max_interrupters);
2296 return -EINVAL;
2297 }
2298
2299 if (xhci->interrupters[intr_num]) {
2300 xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
2301 return -EINVAL;
2302 }
2303
2304 xhci->interrupters[intr_num] = ir;
2305 ir->intr_num = intr_num;
2306 ir->ir_set = &xhci->run_regs->ir_set[intr_num];
2307
2308 /* set ERST count with the number of entries in the segment table */
2309 erst_size = readl(&ir->ir_set->erst_size);
2310 erst_size &= ERST_SIZE_MASK;
2311 erst_size |= ir->event_ring->num_segs;
2312 writel(erst_size, &ir->ir_set->erst_size);
2313
2314 erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
2315 erst_base &= ERST_BASE_RSVDP;
2316 erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
2317 if (xhci->quirks & XHCI_WRITE_64_HI_LO)
2318 hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
2319 else
2320 xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
2321
2322 /* Set the event ring dequeue address of this interrupter */
2323 xhci_set_hc_event_deq(xhci, ir);
2324
2325 return 0;
2326}
2327
2328struct xhci_interrupter *
2329xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
2330 u32 imod_interval)
2331{
2332 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2333 struct xhci_interrupter *ir;
2334 unsigned int i;
2335 int err = -ENOSPC;
2336
2337 if (!xhci->interrupters || xhci->max_interrupters <= 1)
2338 return NULL;
2339
2340 ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
2341 if (!ir)
2342 return NULL;
2343
2344 spin_lock_irq(&xhci->lock);
2345
2346 /* Find available secondary interrupter, interrupter 0 is reserved for primary */
2347 for (i = 1; i < xhci->max_interrupters; i++) {
2348 if (xhci->interrupters[i] == NULL) {
2349 err = xhci_add_interrupter(xhci, ir, i);
2350 break;
2351 }
2352 }
2353
2354 spin_unlock_irq(&xhci->lock);
2355
2356 if (err) {
2357 xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n",
2358 xhci->max_interrupters);
2359 xhci_free_interrupter(xhci, ir);
2360 return NULL;
2361 }
2362
2363 err = xhci_set_interrupter_moderation(ir, imod_interval);
2364 if (err)
2365 xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
2366 i, imod_interval);
2367
2368 xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
2369 i, xhci->max_interrupters);
2370
2371 return ir;
2372}
2373EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
2374
2375int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2376{
2377 struct xhci_interrupter *ir;
2378 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2379 dma_addr_t dma;
2380 unsigned int val, val2;
2381 u64 val_64;
2382 u32 page_size, temp;
2383 int i;
2384
2385 INIT_LIST_HEAD(&xhci->cmd_list);
2386
2387 /* init command timeout work */
2388 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2389 init_completion(&xhci->cmd_ring_stop_completion);
2390
2391 page_size = readl(&xhci->op_regs->page_size);
2392 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2393 "Supported page size register = 0x%x", page_size);
2394 i = ffs(page_size);
2395 if (i < 16)
2396 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 "Supported page size of %iK", (1 << (i+12)) / 1024);
2398 else
2399 xhci_warn(xhci, "WARN: no supported page size\n");
2400 /* Use 4K pages, since that's common and the minimum the HC supports */
2401 xhci->page_shift = 12;
2402 xhci->page_size = 1 << xhci->page_shift;
2403 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2404 "HCD page size set to %iK", xhci->page_size / 1024);
2405
2406 /*
2407 * Program the Number of Device Slots Enabled field in the CONFIG
2408 * register with the max value of slots the HC can handle.
2409 */
2410 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2411 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2412 "// xHC can handle at most %d device slots.", val);
2413 val2 = readl(&xhci->op_regs->config_reg);
2414 val |= (val2 & ~HCS_SLOTS_MASK);
2415 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2416 "// Setting Max device slots reg = 0x%x.", val);
2417 writel(val, &xhci->op_regs->config_reg);
2418
2419 /*
2420 * xHCI section 5.4.6 - Device Context array must be
2421 * "physically contiguous and 64-byte (cache line) aligned".
2422 */
2423 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2424 flags);
2425 if (!xhci->dcbaa)
2426 goto fail;
2427 xhci->dcbaa->dma = dma;
2428 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2429 "// Device context base array address = 0x%pad (DMA), %p (virt)",
2430 &xhci->dcbaa->dma, xhci->dcbaa);
2431 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2432
2433 /*
2434 * Initialize the ring segment pool. The ring must be a contiguous
2435 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2436 * however, the command ring segment needs 64-byte aligned segments
2437 * and our use of dma addresses in the trb_address_map radix tree needs
2438 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2439 */
2440 if (xhci->quirks & XHCI_TRB_OVERFETCH)
2441 /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
2442 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2443 TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
2444 else
2445 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2446 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2447
2448 /* See Table 46 and Note on Figure 55 */
2449 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2450 2112, 64, xhci->page_size);
2451 if (!xhci->segment_pool || !xhci->device_pool)
2452 goto fail;
2453
2454 /* Linear stream context arrays don't have any boundary restrictions,
2455 * and only need to be 16-byte aligned.
2456 */
2457 xhci->small_streams_pool =
2458 dma_pool_create("xHCI 256 byte stream ctx arrays",
2459 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2460 xhci->medium_streams_pool =
2461 dma_pool_create("xHCI 1KB stream ctx arrays",
2462 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2463 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2464 * will be allocated with dma_alloc_coherent()
2465 */
2466
2467 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2468 goto fail;
2469
2470 /* Set up the command ring to have one segments for now. */
2471 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
2472 if (!xhci->cmd_ring)
2473 goto fail;
2474 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2475 "Allocated command ring at %p", xhci->cmd_ring);
2476 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
2477 &xhci->cmd_ring->first_seg->dma);
2478
2479 /* Set the address in the Command Ring Control register */
2480 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2481 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2482 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2483 xhci->cmd_ring->cycle_state;
2484 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2485 "// Setting command ring address to 0x%016llx", val_64);
2486 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2487
2488 /* Reserve one command ring TRB for disabling LPM.
2489 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2490 * disabling LPM, we only need to reserve one TRB for all devices.
2491 */
2492 xhci->cmd_ring_reserved_trbs++;
2493
2494 val = readl(&xhci->cap_regs->db_off);
2495 val &= DBOFF_MASK;
2496 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2497 "// Doorbell array is located at offset 0x%x from cap regs base addr",
2498 val);
2499 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2500
2501 /* Allocate and set up primary interrupter 0 with an event ring. */
2502 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2503 "Allocating primary event ring");
2504 xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
2505 flags, dev_to_node(dev));
2506
2507 ir = xhci_alloc_interrupter(xhci, 0, flags);
2508 if (!ir)
2509 goto fail;
2510
2511 if (xhci_add_interrupter(xhci, ir, 0))
2512 goto fail;
2513
2514 ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
2515
2516 for (i = 0; i < MAX_HC_SLOTS; i++)
2517 xhci->devs[i] = NULL;
2518
2519 if (scratchpad_alloc(xhci, flags))
2520 goto fail;
2521 if (xhci_setup_port_arrays(xhci, flags))
2522 goto fail;
2523
2524 /* Enable USB 3.0 device notifications for function remote wake, which
2525 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2526 * U3 (device suspend).
2527 */
2528 temp = readl(&xhci->op_regs->dev_notification);
2529 temp &= ~DEV_NOTE_MASK;
2530 temp |= DEV_NOTE_FWAKE;
2531 writel(temp, &xhci->op_regs->dev_notification);
2532
2533 return 0;
2534
2535fail:
2536 xhci_halt(xhci);
2537 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
2538 xhci_mem_cleanup(xhci);
2539 return -ENOMEM;
2540}