Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * xHCI host controller driver
  3 *
  4 * Copyright (C) 2008 Intel Corp.
  5 *
  6 * Author: Sarah Sharp
  7 * Some code borrowed from the Linux EHCI driver.
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 16 * for more details.
 17 *
 18 * You should have received a copy of the GNU General Public License
 19 * along with this program; if not, write to the Free Software Foundation,
 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 21 */
 22
 23#include "xhci.h"
 24
 25#define XHCI_INIT_VALUE 0x0
 26
 27/* Add verbose debugging later, just print everything for now */
 28
 29void xhci_dbg_regs(struct xhci_hcd *xhci)
 30{
 31	u32 temp;
 32
 33	xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
 34			xhci->cap_regs);
 35	temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
 36	xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
 37			&xhci->cap_regs->hc_capbase, temp);
 38	xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
 39			(unsigned int) HC_LENGTH(temp));
 40#if 0
 41	xhci_dbg(xhci, "//   HCIVERSION: 0x%x\n",
 42			(unsigned int) HC_VERSION(temp));
 43#endif
 44
 45	xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
 46
 47	temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
 48	xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
 49			&xhci->cap_regs->run_regs_off,
 50			(unsigned int) temp & RTSOFF_MASK);
 51	xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
 52
 53	temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
 54	xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
 55	xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
 56}
 57
 58static void xhci_print_cap_regs(struct xhci_hcd *xhci)
 59{
 60	u32 temp;
 
 61
 62	xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
 63
 64	temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
 
 65	xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
 66			(unsigned int) temp);
 67	xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
 68			(unsigned int) HC_LENGTH(temp));
 69	xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
 70			(unsigned int) HC_VERSION(temp));
 71
 72	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
 73	xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
 74			(unsigned int) temp);
 75	xhci_dbg(xhci, "  Max device slots: %u\n",
 76			(unsigned int) HCS_MAX_SLOTS(temp));
 77	xhci_dbg(xhci, "  Max interrupters: %u\n",
 78			(unsigned int) HCS_MAX_INTRS(temp));
 79	xhci_dbg(xhci, "  Max ports: %u\n",
 80			(unsigned int) HCS_MAX_PORTS(temp));
 81
 82	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
 83	xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
 84			(unsigned int) temp);
 85	xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
 86			(unsigned int) HCS_IST(temp));
 87	xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
 88			(unsigned int) HCS_ERST_MAX(temp));
 89
 90	temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
 91	xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
 92			(unsigned int) temp);
 93	xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
 94			(unsigned int) HCS_U1_LATENCY(temp));
 95	xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
 96			(unsigned int) HCS_U2_LATENCY(temp));
 97
 98	temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
 99	xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
100	xhci_dbg(xhci, "  HC generates %s bit addresses\n",
101			HCC_64BIT_ADDR(temp) ? "64" : "32");
 
 
 
 
102	/* FIXME */
103	xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
104
105	temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
106	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
107}
108
109static void xhci_print_command_reg(struct xhci_hcd *xhci)
110{
111	u32 temp;
112
113	temp = xhci_readl(xhci, &xhci->op_regs->command);
114	xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
115	xhci_dbg(xhci, "  HC is %s\n",
116			(temp & CMD_RUN) ? "running" : "being stopped");
117	xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
118			(temp & CMD_RESET) ? "not " : "");
119	xhci_dbg(xhci, "  Event Interrupts %s\n",
120			(temp & CMD_EIE) ? "enabled " : "disabled");
121	xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
122			(temp & CMD_EIE) ? "enabled " : "disabled");
123	xhci_dbg(xhci, "  HC has %sfinished light reset\n",
124			(temp & CMD_LRESET) ? "not " : "");
125}
126
127static void xhci_print_status(struct xhci_hcd *xhci)
128{
129	u32 temp;
130
131	temp = xhci_readl(xhci, &xhci->op_regs->status);
132	xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
133	xhci_dbg(xhci, "  Event ring is %sempty\n",
134			(temp & STS_EINT) ? "not " : "");
135	xhci_dbg(xhci, "  %sHost System Error\n",
136			(temp & STS_FATAL) ? "WARNING: " : "No ");
137	xhci_dbg(xhci, "  HC is %s\n",
138			(temp & STS_HALT) ? "halted" : "running");
139}
140
141static void xhci_print_op_regs(struct xhci_hcd *xhci)
142{
143	xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
144	xhci_print_command_reg(xhci);
145	xhci_print_status(xhci);
146}
147
148static void xhci_print_ports(struct xhci_hcd *xhci)
149{
150	__le32 __iomem *addr;
151	int i, j;
152	int ports;
153	char *names[NUM_PORT_REGS] = {
154		"status",
155		"power",
156		"link",
157		"reserved",
158	};
159
160	ports = HCS_MAX_PORTS(xhci->hcs_params1);
161	addr = &xhci->op_regs->port_status_base;
162	for (i = 0; i < ports; i++) {
163		for (j = 0; j < NUM_PORT_REGS; ++j) {
164			xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
165					addr, names[j],
166					(unsigned int) xhci_readl(xhci, addr));
167			addr++;
168		}
169	}
170}
171
172void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
173{
174	struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
175	void __iomem *addr;
176	u32 temp;
177	u64 temp_64;
178
179	addr = &ir_set->irq_pending;
180	temp = xhci_readl(xhci, addr);
181	if (temp == XHCI_INIT_VALUE)
182		return;
183
184	xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
185
186	xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
187			(unsigned int)temp);
188
189	addr = &ir_set->irq_control;
190	temp = xhci_readl(xhci, addr);
191	xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
192			(unsigned int)temp);
193
194	addr = &ir_set->erst_size;
195	temp = xhci_readl(xhci, addr);
196	xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
197			(unsigned int)temp);
198
199	addr = &ir_set->rsvd;
200	temp = xhci_readl(xhci, addr);
201	if (temp != XHCI_INIT_VALUE)
202		xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
203				addr, (unsigned int)temp);
204
205	addr = &ir_set->erst_base;
206	temp_64 = xhci_read_64(xhci, addr);
207	xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
208			addr, temp_64);
209
210	addr = &ir_set->erst_dequeue;
211	temp_64 = xhci_read_64(xhci, addr);
212	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
213			addr, temp_64);
214}
215
216void xhci_print_run_regs(struct xhci_hcd *xhci)
217{
218	u32 temp;
219	int i;
220
221	xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
222	temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
223	xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
224			&xhci->run_regs->microframe_index,
225			(unsigned int) temp);
226	for (i = 0; i < 7; ++i) {
227		temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
228		if (temp != XHCI_INIT_VALUE)
229			xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
230					&xhci->run_regs->rsvd[i],
231					i, (unsigned int) temp);
232	}
233}
234
235void xhci_print_registers(struct xhci_hcd *xhci)
236{
237	xhci_print_cap_regs(xhci);
238	xhci_print_op_regs(xhci);
239	xhci_print_ports(xhci);
240}
241
242void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
243{
244	int i;
245	for (i = 0; i < 4; ++i)
246		xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
247				i*4, trb->generic.field[i]);
248}
249
250/**
251 * Debug a transfer request block (TRB).
252 */
253void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
254{
255	u64	address;
256	u32	type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
257
258	switch (type) {
259	case TRB_TYPE(TRB_LINK):
260		xhci_dbg(xhci, "Link TRB:\n");
261		xhci_print_trb_offsets(xhci, trb);
262
263		address = le64_to_cpu(trb->link.segment_ptr);
264		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
265
266		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
267			 GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
268		xhci_dbg(xhci, "Cycle bit = %u\n",
269			 le32_to_cpu(trb->link.control) & TRB_CYCLE);
270		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
271			 le32_to_cpu(trb->link.control) & LINK_TOGGLE);
272		xhci_dbg(xhci, "No Snoop bit = %u\n",
273			 le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
274		break;
275	case TRB_TYPE(TRB_TRANSFER):
276		address = le64_to_cpu(trb->trans_event.buffer);
277		/*
278		 * FIXME: look at flags to figure out if it's an address or if
279		 * the data is directly in the buffer field.
280		 */
281		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
282		break;
283	case TRB_TYPE(TRB_COMPLETION):
284		address = le64_to_cpu(trb->event_cmd.cmd_trb);
285		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
286		xhci_dbg(xhci, "Completion status = %u\n",
287			 GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
288		xhci_dbg(xhci, "Flags = 0x%x\n",
289			 le32_to_cpu(trb->event_cmd.flags));
290		break;
291	default:
292		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
293				(unsigned int) type>>10);
294		xhci_print_trb_offsets(xhci, trb);
295		break;
296	}
297}
298
299/**
300 * Debug a segment with an xHCI ring.
301 *
302 * @return The Link TRB of the segment, or NULL if there is no Link TRB
303 * (which is a bug, since all segments must have a Link TRB).
304 *
305 * Prints out all TRBs in the segment, even those after the Link TRB.
306 *
307 * XXX: should we print out TRBs that the HC owns?  As long as we don't
308 * write, that should be fine...  We shouldn't expect that the memory pointed to
309 * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
310 * for HC debugging.
311 */
312void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
313{
314	int i;
315	u64 addr = seg->dma;
316	union xhci_trb *trb = seg->trbs;
317
318	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
319		trb = &seg->trbs[i];
320		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
321			 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
322			 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
323			 le32_to_cpu(trb->link.intr_target),
324			 le32_to_cpu(trb->link.control));
325		addr += sizeof(*trb);
326	}
327}
328
329void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
330{
331	xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
332			ring->dequeue,
333			(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
334							    ring->dequeue));
335	xhci_dbg(xhci, "Ring deq updated %u times\n",
336			ring->deq_updates);
337	xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
338			ring->enqueue,
339			(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
340							    ring->enqueue));
341	xhci_dbg(xhci, "Ring enq updated %u times\n",
342			ring->enq_updates);
343}
344
345/**
346 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
347 *
348 * Print out each segment in the ring.  Check that the DMA address in
349 * each link segment actually matches the segment's stored DMA address.
350 * Check that the link end bit is only set at the end of the ring.
351 * Check that the dequeue and enqueue pointers point to real data in this ring
352 * (not some other ring).
353 */
354void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
355{
356	/* FIXME: Throw an error if any segment doesn't have a Link TRB */
357	struct xhci_segment *seg;
358	struct xhci_segment *first_seg = ring->first_seg;
359	xhci_debug_segment(xhci, first_seg);
360
361	if (!ring->enq_updates && !ring->deq_updates) {
362		xhci_dbg(xhci, "  Ring has not been updated\n");
363		return;
364	}
365	for (seg = first_seg->next; seg != first_seg; seg = seg->next)
366		xhci_debug_segment(xhci, seg);
367}
368
369void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
370		unsigned int slot_id, unsigned int ep_index,
371		struct xhci_virt_ep *ep)
372{
373	int i;
374	struct xhci_ring *ring;
375
376	if (ep->ep_state & EP_HAS_STREAMS) {
377		for (i = 1; i < ep->stream_info->num_streams; i++) {
378			ring = ep->stream_info->stream_rings[i];
379			xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
380				slot_id, ep_index, i);
381			xhci_debug_segment(xhci, ring->deq_seg);
382		}
383	} else {
384		ring = ep->ring;
385		if (!ring)
386			return;
387		xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
388				slot_id, ep_index);
389		xhci_debug_segment(xhci, ring->deq_seg);
390	}
391}
392
393void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
394{
395	u64 addr = erst->erst_dma_addr;
396	int i;
397	struct xhci_erst_entry *entry;
398
399	for (i = 0; i < erst->num_entries; ++i) {
400		entry = &erst->entries[i];
401		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
402			 addr,
403			 lower_32_bits(le64_to_cpu(entry->seg_addr)),
404			 upper_32_bits(le64_to_cpu(entry->seg_addr)),
405			 le32_to_cpu(entry->seg_size),
406			 le32_to_cpu(entry->rsvd));
407		addr += sizeof(*entry);
408	}
409}
410
411void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
412{
413	u64 val;
414
415	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
416	xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
417			lower_32_bits(val));
418	xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
419			upper_32_bits(val));
420}
421
422/* Print the last 32 bytes for 64-byte contexts */
423static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
424{
425	int i;
426	for (i = 0; i < 4; ++i) {
427		xhci_dbg(xhci, "@%p (virt) @%08llx "
428			 "(dma) %#08llx - rsvd64[%d]\n",
429			 &ctx[4 + i], (unsigned long long)dma,
430			 ctx[4 + i], i);
431		dma += 8;
432	}
433}
434
435char *xhci_get_slot_state(struct xhci_hcd *xhci,
436		struct xhci_container_ctx *ctx)
437{
438	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
439
440	switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
441	case SLOT_STATE_ENABLED:
442		return "enabled/disabled";
443	case SLOT_STATE_DEFAULT:
444		return "default";
445	case SLOT_STATE_ADDRESSED:
446		return "addressed";
447	case SLOT_STATE_CONFIGURED:
448		return "configured";
449	default:
450		return "reserved";
451	}
452}
453
454static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
455{
456	/* Fields are 32 bits wide, DMA addresses are in bytes */
457	int field_size = 32 / 8;
458	int i;
459
460	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
461	dma_addr_t dma = ctx->dma +
462		((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
463	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
464
465	xhci_dbg(xhci, "Slot Context:\n");
466	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
467			&slot_ctx->dev_info,
468			(unsigned long long)dma, slot_ctx->dev_info);
469	dma += field_size;
470	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
471			&slot_ctx->dev_info2,
472			(unsigned long long)dma, slot_ctx->dev_info2);
473	dma += field_size;
474	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
475			&slot_ctx->tt_info,
476			(unsigned long long)dma, slot_ctx->tt_info);
477	dma += field_size;
478	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
479			&slot_ctx->dev_state,
480			(unsigned long long)dma, slot_ctx->dev_state);
481	dma += field_size;
482	for (i = 0; i < 4; ++i) {
483		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
484				&slot_ctx->reserved[i], (unsigned long long)dma,
485				slot_ctx->reserved[i], i);
486		dma += field_size;
487	}
488
489	if (csz)
490		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
491}
492
493static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
494		     struct xhci_container_ctx *ctx,
495		     unsigned int last_ep)
496{
497	int i, j;
498	int last_ep_ctx = 31;
499	/* Fields are 32 bits wide, DMA addresses are in bytes */
500	int field_size = 32 / 8;
501	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
502
503	if (last_ep < 31)
504		last_ep_ctx = last_ep + 1;
505	for (i = 0; i < last_ep_ctx; ++i) {
 
506		struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
507		dma_addr_t dma = ctx->dma +
508			((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
509
510		xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
 
 
511		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
512				&ep_ctx->ep_info,
513				(unsigned long long)dma, ep_ctx->ep_info);
514		dma += field_size;
515		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
516				&ep_ctx->ep_info2,
517				(unsigned long long)dma, ep_ctx->ep_info2);
518		dma += field_size;
519		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
520				&ep_ctx->deq,
521				(unsigned long long)dma, ep_ctx->deq);
522		dma += 2*field_size;
523		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
524				&ep_ctx->tx_info,
525				(unsigned long long)dma, ep_ctx->tx_info);
526		dma += field_size;
527		for (j = 0; j < 3; ++j) {
528			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
529					&ep_ctx->reserved[j],
530					(unsigned long long)dma,
531					ep_ctx->reserved[j], j);
532			dma += field_size;
533		}
534
535		if (csz)
536			dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
537	}
538}
539
540void xhci_dbg_ctx(struct xhci_hcd *xhci,
541		  struct xhci_container_ctx *ctx,
542		  unsigned int last_ep)
543{
544	int i;
545	/* Fields are 32 bits wide, DMA addresses are in bytes */
546	int field_size = 32 / 8;
547	struct xhci_slot_ctx *slot_ctx;
548	dma_addr_t dma = ctx->dma;
549	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
550
551	if (ctx->type == XHCI_CTX_TYPE_INPUT) {
552		struct xhci_input_control_ctx *ctrl_ctx =
553			xhci_get_input_control_ctx(xhci, ctx);
 
 
 
 
 
554		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
555			 &ctrl_ctx->drop_flags, (unsigned long long)dma,
556			 ctrl_ctx->drop_flags);
557		dma += field_size;
558		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
559			 &ctrl_ctx->add_flags, (unsigned long long)dma,
560			 ctrl_ctx->add_flags);
561		dma += field_size;
562		for (i = 0; i < 6; ++i) {
563			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
564				 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
565				 ctrl_ctx->rsvd2[i], i);
566			dma += field_size;
567		}
568
569		if (csz)
570			dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
571	}
572
573	slot_ctx = xhci_get_slot_ctx(xhci, ctx);
574	xhci_dbg_slot_ctx(xhci, ctx);
575	xhci_dbg_ep_ctx(xhci, ctx, last_ep);
576}
v4.10.11
  1/*
  2 * xHCI host controller driver
  3 *
  4 * Copyright (C) 2008 Intel Corp.
  5 *
  6 * Author: Sarah Sharp
  7 * Some code borrowed from the Linux EHCI driver.
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License version 2 as
 11 * published by the Free Software Foundation.
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 15 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 16 * for more details.
 17 *
 18 * You should have received a copy of the GNU General Public License
 19 * along with this program; if not, write to the Free Software Foundation,
 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 21 */
 22
 23#include "xhci.h"
 24
 25#define XHCI_INIT_VALUE 0x0
 26
 27/* Add verbose debugging later, just print everything for now */
 28
 29void xhci_dbg_regs(struct xhci_hcd *xhci)
 30{
 31	u32 temp;
 32
 33	xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
 34			xhci->cap_regs);
 35	temp = readl(&xhci->cap_regs->hc_capbase);
 36	xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
 37			&xhci->cap_regs->hc_capbase, temp);
 38	xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
 39			(unsigned int) HC_LENGTH(temp));
 40#if 0
 41	xhci_dbg(xhci, "//   HCIVERSION: 0x%x\n",
 42			(unsigned int) HC_VERSION(temp));
 43#endif
 44
 45	xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
 46
 47	temp = readl(&xhci->cap_regs->run_regs_off);
 48	xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
 49			&xhci->cap_regs->run_regs_off,
 50			(unsigned int) temp & RTSOFF_MASK);
 51	xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
 52
 53	temp = readl(&xhci->cap_regs->db_off);
 54	xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
 55	xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
 56}
 57
 58static void xhci_print_cap_regs(struct xhci_hcd *xhci)
 59{
 60	u32 temp;
 61	u32 hci_version;
 62
 63	xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
 64
 65	temp = readl(&xhci->cap_regs->hc_capbase);
 66	hci_version = HC_VERSION(temp);
 67	xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
 68			(unsigned int) temp);
 69	xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
 70			(unsigned int) HC_LENGTH(temp));
 71	xhci_dbg(xhci, "HCIVERSION: 0x%x\n", hci_version);
 
 72
 73	temp = readl(&xhci->cap_regs->hcs_params1);
 74	xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
 75			(unsigned int) temp);
 76	xhci_dbg(xhci, "  Max device slots: %u\n",
 77			(unsigned int) HCS_MAX_SLOTS(temp));
 78	xhci_dbg(xhci, "  Max interrupters: %u\n",
 79			(unsigned int) HCS_MAX_INTRS(temp));
 80	xhci_dbg(xhci, "  Max ports: %u\n",
 81			(unsigned int) HCS_MAX_PORTS(temp));
 82
 83	temp = readl(&xhci->cap_regs->hcs_params2);
 84	xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
 85			(unsigned int) temp);
 86	xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
 87			(unsigned int) HCS_IST(temp));
 88	xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
 89			(unsigned int) HCS_ERST_MAX(temp));
 90
 91	temp = readl(&xhci->cap_regs->hcs_params3);
 92	xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
 93			(unsigned int) temp);
 94	xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
 95			(unsigned int) HCS_U1_LATENCY(temp));
 96	xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
 97			(unsigned int) HCS_U2_LATENCY(temp));
 98
 99	temp = readl(&xhci->cap_regs->hcc_params);
100	xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
101	xhci_dbg(xhci, "  HC generates %s bit addresses\n",
102			HCC_64BIT_ADDR(temp) ? "64" : "32");
103	xhci_dbg(xhci, "  HC %s Contiguous Frame ID Capability\n",
104			HCC_CFC(temp) ? "has" : "hasn't");
105	xhci_dbg(xhci, "  HC %s generate Stopped - Short Package event\n",
106			HCC_SPC(temp) ? "can" : "can't");
107	/* FIXME */
108	xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
109
110	temp = readl(&xhci->cap_regs->run_regs_off);
111	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
112
113	/* xhci 1.1 controllers have the HCCPARAMS2 register */
114	if (hci_version > 0x100) {
115		temp = readl(&xhci->cap_regs->hcc_params2);
116		xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
117		xhci_dbg(xhci, "  HC %s Force save context capability",
118			 HCC2_FSC(temp) ? "supports" : "doesn't support");
119		xhci_dbg(xhci, "  HC %s Large ESIT Payload Capability",
120			 HCC2_LEC(temp) ? "supports" : "doesn't support");
121		xhci_dbg(xhci, "  HC %s Extended TBC capability",
122			 HCC2_ETC(temp) ? "supports" : "doesn't support");
123	}
124}
125
126static void xhci_print_command_reg(struct xhci_hcd *xhci)
127{
128	u32 temp;
129
130	temp = readl(&xhci->op_regs->command);
131	xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
132	xhci_dbg(xhci, "  HC is %s\n",
133			(temp & CMD_RUN) ? "running" : "being stopped");
134	xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
135			(temp & CMD_RESET) ? "not " : "");
136	xhci_dbg(xhci, "  Event Interrupts %s\n",
137			(temp & CMD_EIE) ? "enabled " : "disabled");
138	xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
139			(temp & CMD_HSEIE) ? "enabled " : "disabled");
140	xhci_dbg(xhci, "  HC has %sfinished light reset\n",
141			(temp & CMD_LRESET) ? "not " : "");
142}
143
144static void xhci_print_status(struct xhci_hcd *xhci)
145{
146	u32 temp;
147
148	temp = readl(&xhci->op_regs->status);
149	xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
150	xhci_dbg(xhci, "  Event ring is %sempty\n",
151			(temp & STS_EINT) ? "not " : "");
152	xhci_dbg(xhci, "  %sHost System Error\n",
153			(temp & STS_FATAL) ? "WARNING: " : "No ");
154	xhci_dbg(xhci, "  HC is %s\n",
155			(temp & STS_HALT) ? "halted" : "running");
156}
157
158static void xhci_print_op_regs(struct xhci_hcd *xhci)
159{
160	xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
161	xhci_print_command_reg(xhci);
162	xhci_print_status(xhci);
163}
164
165static void xhci_print_ports(struct xhci_hcd *xhci)
166{
167	__le32 __iomem *addr;
168	int i, j;
169	int ports;
170	char *names[NUM_PORT_REGS] = {
171		"status",
172		"power",
173		"link",
174		"reserved",
175	};
176
177	ports = HCS_MAX_PORTS(xhci->hcs_params1);
178	addr = &xhci->op_regs->port_status_base;
179	for (i = 0; i < ports; i++) {
180		for (j = 0; j < NUM_PORT_REGS; ++j) {
181			xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
182					addr, names[j],
183					(unsigned int) readl(addr));
184			addr++;
185		}
186	}
187}
188
189void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
190{
191	struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
192	void __iomem *addr;
193	u32 temp;
194	u64 temp_64;
195
196	addr = &ir_set->irq_pending;
197	temp = readl(addr);
198	if (temp == XHCI_INIT_VALUE)
199		return;
200
201	xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
202
203	xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
204			(unsigned int)temp);
205
206	addr = &ir_set->irq_control;
207	temp = readl(addr);
208	xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
209			(unsigned int)temp);
210
211	addr = &ir_set->erst_size;
212	temp = readl(addr);
213	xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
214			(unsigned int)temp);
215
216	addr = &ir_set->rsvd;
217	temp = readl(addr);
218	if (temp != XHCI_INIT_VALUE)
219		xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
220				addr, (unsigned int)temp);
221
222	addr = &ir_set->erst_base;
223	temp_64 = xhci_read_64(xhci, addr);
224	xhci_dbg(xhci, "  %p: ir_set.erst_base = @%08llx\n",
225			addr, temp_64);
226
227	addr = &ir_set->erst_dequeue;
228	temp_64 = xhci_read_64(xhci, addr);
229	xhci_dbg(xhci, "  %p: ir_set.erst_dequeue = @%08llx\n",
230			addr, temp_64);
231}
232
233void xhci_print_run_regs(struct xhci_hcd *xhci)
234{
235	u32 temp;
236	int i;
237
238	xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
239	temp = readl(&xhci->run_regs->microframe_index);
240	xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
241			&xhci->run_regs->microframe_index,
242			(unsigned int) temp);
243	for (i = 0; i < 7; ++i) {
244		temp = readl(&xhci->run_regs->rsvd[i]);
245		if (temp != XHCI_INIT_VALUE)
246			xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
247					&xhci->run_regs->rsvd[i],
248					i, (unsigned int) temp);
249	}
250}
251
252void xhci_print_registers(struct xhci_hcd *xhci)
253{
254	xhci_print_cap_regs(xhci);
255	xhci_print_op_regs(xhci);
256	xhci_print_ports(xhci);
257}
258
259void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
260{
261	int i;
262	for (i = 0; i < 4; ++i)
263		xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
264				i*4, trb->generic.field[i]);
265}
266
267/**
268 * Debug a transfer request block (TRB).
269 */
270void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
271{
272	u64	address;
273	u32	type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
274
275	switch (type) {
276	case TRB_TYPE(TRB_LINK):
277		xhci_dbg(xhci, "Link TRB:\n");
278		xhci_print_trb_offsets(xhci, trb);
279
280		address = le64_to_cpu(trb->link.segment_ptr);
281		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
282
283		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
284			 GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
285		xhci_dbg(xhci, "Cycle bit = %u\n",
286			 le32_to_cpu(trb->link.control) & TRB_CYCLE);
287		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
288			 le32_to_cpu(trb->link.control) & LINK_TOGGLE);
289		xhci_dbg(xhci, "No Snoop bit = %u\n",
290			 le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
291		break;
292	case TRB_TYPE(TRB_TRANSFER):
293		address = le64_to_cpu(trb->trans_event.buffer);
294		/*
295		 * FIXME: look at flags to figure out if it's an address or if
296		 * the data is directly in the buffer field.
297		 */
298		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
299		break;
300	case TRB_TYPE(TRB_COMPLETION):
301		address = le64_to_cpu(trb->event_cmd.cmd_trb);
302		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
303		xhci_dbg(xhci, "Completion status = %u\n",
304			 GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
305		xhci_dbg(xhci, "Flags = 0x%x\n",
306			 le32_to_cpu(trb->event_cmd.flags));
307		break;
308	default:
309		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
310				(unsigned int) type>>10);
311		xhci_print_trb_offsets(xhci, trb);
312		break;
313	}
314}
315
316/**
317 * Debug a segment with an xHCI ring.
318 *
319 * @return The Link TRB of the segment, or NULL if there is no Link TRB
320 * (which is a bug, since all segments must have a Link TRB).
321 *
322 * Prints out all TRBs in the segment, even those after the Link TRB.
323 *
324 * XXX: should we print out TRBs that the HC owns?  As long as we don't
325 * write, that should be fine...  We shouldn't expect that the memory pointed to
326 * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
327 * for HC debugging.
328 */
329void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
330{
331	int i;
332	u64 addr = seg->dma;
333	union xhci_trb *trb = seg->trbs;
334
335	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
336		trb = &seg->trbs[i];
337		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
338			 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
339			 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
340			 le32_to_cpu(trb->link.intr_target),
341			 le32_to_cpu(trb->link.control));
342		addr += sizeof(*trb);
343	}
344}
345
346void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
347{
348	xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
349			ring->dequeue,
350			(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
351							    ring->dequeue));
352	xhci_dbg(xhci, "Ring deq updated %u times\n",
353			ring->deq_updates);
354	xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
355			ring->enqueue,
356			(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
357							    ring->enqueue));
358	xhci_dbg(xhci, "Ring enq updated %u times\n",
359			ring->enq_updates);
360}
361
362/**
363 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
364 *
365 * Print out each segment in the ring.  Check that the DMA address in
366 * each link segment actually matches the segment's stored DMA address.
367 * Check that the link end bit is only set at the end of the ring.
368 * Check that the dequeue and enqueue pointers point to real data in this ring
369 * (not some other ring).
370 */
371void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
372{
373	/* FIXME: Throw an error if any segment doesn't have a Link TRB */
374	struct xhci_segment *seg;
375	struct xhci_segment *first_seg = ring->first_seg;
376	xhci_debug_segment(xhci, first_seg);
377
378	if (!ring->enq_updates && !ring->deq_updates) {
379		xhci_dbg(xhci, "  Ring has not been updated\n");
380		return;
381	}
382	for (seg = first_seg->next; seg != first_seg; seg = seg->next)
383		xhci_debug_segment(xhci, seg);
384}
385
386void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
387		unsigned int slot_id, unsigned int ep_index,
388		struct xhci_virt_ep *ep)
389{
390	int i;
391	struct xhci_ring *ring;
392
393	if (ep->ep_state & EP_HAS_STREAMS) {
394		for (i = 1; i < ep->stream_info->num_streams; i++) {
395			ring = ep->stream_info->stream_rings[i];
396			xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
397				slot_id, ep_index, i);
398			xhci_debug_segment(xhci, ring->deq_seg);
399		}
400	} else {
401		ring = ep->ring;
402		if (!ring)
403			return;
404		xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
405				slot_id, ep_index);
406		xhci_debug_segment(xhci, ring->deq_seg);
407	}
408}
409
410void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
411{
412	u64 addr = erst->erst_dma_addr;
413	int i;
414	struct xhci_erst_entry *entry;
415
416	for (i = 0; i < erst->num_entries; ++i) {
417		entry = &erst->entries[i];
418		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
419			 addr,
420			 lower_32_bits(le64_to_cpu(entry->seg_addr)),
421			 upper_32_bits(le64_to_cpu(entry->seg_addr)),
422			 le32_to_cpu(entry->seg_size),
423			 le32_to_cpu(entry->rsvd));
424		addr += sizeof(*entry);
425	}
426}
427
428void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
429{
430	u64 val;
431
432	val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
433	xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
434			lower_32_bits(val));
435	xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
436			upper_32_bits(val));
437}
438
439/* Print the last 32 bytes for 64-byte contexts */
440static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
441{
442	int i;
443	for (i = 0; i < 4; ++i) {
444		xhci_dbg(xhci, "@%p (virt) @%08llx "
445			 "(dma) %#08llx - rsvd64[%d]\n",
446			 &ctx[4 + i], (unsigned long long)dma,
447			 ctx[4 + i], i);
448		dma += 8;
449	}
450}
451
452char *xhci_get_slot_state(struct xhci_hcd *xhci,
453		struct xhci_container_ctx *ctx)
454{
455	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
456
457	switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
458	case SLOT_STATE_ENABLED:
459		return "enabled/disabled";
460	case SLOT_STATE_DEFAULT:
461		return "default";
462	case SLOT_STATE_ADDRESSED:
463		return "addressed";
464	case SLOT_STATE_CONFIGURED:
465		return "configured";
466	default:
467		return "reserved";
468	}
469}
470
471static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
472{
473	/* Fields are 32 bits wide, DMA addresses are in bytes */
474	int field_size = 32 / 8;
475	int i;
476
477	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
478	dma_addr_t dma = ctx->dma +
479		((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
480	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
481
482	xhci_dbg(xhci, "Slot Context:\n");
483	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
484			&slot_ctx->dev_info,
485			(unsigned long long)dma, slot_ctx->dev_info);
486	dma += field_size;
487	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
488			&slot_ctx->dev_info2,
489			(unsigned long long)dma, slot_ctx->dev_info2);
490	dma += field_size;
491	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
492			&slot_ctx->tt_info,
493			(unsigned long long)dma, slot_ctx->tt_info);
494	dma += field_size;
495	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
496			&slot_ctx->dev_state,
497			(unsigned long long)dma, slot_ctx->dev_state);
498	dma += field_size;
499	for (i = 0; i < 4; ++i) {
500		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
501				&slot_ctx->reserved[i], (unsigned long long)dma,
502				slot_ctx->reserved[i], i);
503		dma += field_size;
504	}
505
506	if (csz)
507		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
508}
509
510static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
511		     struct xhci_container_ctx *ctx,
512		     unsigned int last_ep)
513{
514	int i, j;
515	int last_ep_ctx = 31;
516	/* Fields are 32 bits wide, DMA addresses are in bytes */
517	int field_size = 32 / 8;
518	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
519
520	if (last_ep < 31)
521		last_ep_ctx = last_ep + 1;
522	for (i = 0; i < last_ep_ctx; ++i) {
523		unsigned int epaddr = xhci_get_endpoint_address(i);
524		struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
525		dma_addr_t dma = ctx->dma +
526			((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
527
528		xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
529				usb_endpoint_out(epaddr) ? "OUT" : "IN",
530				epaddr & USB_ENDPOINT_NUMBER_MASK, i);
531		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
532				&ep_ctx->ep_info,
533				(unsigned long long)dma, ep_ctx->ep_info);
534		dma += field_size;
535		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
536				&ep_ctx->ep_info2,
537				(unsigned long long)dma, ep_ctx->ep_info2);
538		dma += field_size;
539		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
540				&ep_ctx->deq,
541				(unsigned long long)dma, ep_ctx->deq);
542		dma += 2*field_size;
543		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
544				&ep_ctx->tx_info,
545				(unsigned long long)dma, ep_ctx->tx_info);
546		dma += field_size;
547		for (j = 0; j < 3; ++j) {
548			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
549					&ep_ctx->reserved[j],
550					(unsigned long long)dma,
551					ep_ctx->reserved[j], j);
552			dma += field_size;
553		}
554
555		if (csz)
556			dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
557	}
558}
559
560void xhci_dbg_ctx(struct xhci_hcd *xhci,
561		  struct xhci_container_ctx *ctx,
562		  unsigned int last_ep)
563{
564	int i;
565	/* Fields are 32 bits wide, DMA addresses are in bytes */
566	int field_size = 32 / 8;
 
567	dma_addr_t dma = ctx->dma;
568	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
569
570	if (ctx->type == XHCI_CTX_TYPE_INPUT) {
571		struct xhci_input_control_ctx *ctrl_ctx =
572			xhci_get_input_control_ctx(ctx);
573		if (!ctrl_ctx) {
574			xhci_warn(xhci, "Could not get input context, bad type.\n");
575			return;
576		}
577
578		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
579			 &ctrl_ctx->drop_flags, (unsigned long long)dma,
580			 ctrl_ctx->drop_flags);
581		dma += field_size;
582		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
583			 &ctrl_ctx->add_flags, (unsigned long long)dma,
584			 ctrl_ctx->add_flags);
585		dma += field_size;
586		for (i = 0; i < 6; ++i) {
587			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
588				 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
589				 ctrl_ctx->rsvd2[i], i);
590			dma += field_size;
591		}
592
593		if (csz)
594			dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
595	}
596
 
597	xhci_dbg_slot_ctx(xhci, ctx);
598	xhci_dbg_ep_ctx(xhci, ctx, last_ep);
599}
600
601void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
602			const char *fmt, ...)
603{
604	struct va_format vaf;
605	va_list args;
606
607	va_start(args, fmt);
608	vaf.fmt = fmt;
609	vaf.va = &args;
610	xhci_dbg(xhci, "%pV\n", &vaf);
611	trace(&vaf);
612	va_end(args);
613}
614EXPORT_SYMBOL_GPL(xhci_dbg_trace);