Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/**
   2 * xhci-dbgcap.c - xHCI debug capability support
   3 *
   4 * Copyright (C) 2017 Intel Corporation
   5 *
   6 * Author: Lu Baolu <baolu.lu@linux.intel.com>
   7 */
   8#include <linux/dma-mapping.h>
   9#include <linux/slab.h>
  10#include <linux/nls.h>
  11
  12#include "xhci.h"
  13#include "xhci-trace.h"
  14#include "xhci-dbgcap.h"
  15
  16static inline void *
  17dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
  18		       dma_addr_t *dma_handle, gfp_t flags)
  19{
  20	void		*vaddr;
  21
  22	vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
  23				   size, dma_handle, flags);
  24	memset(vaddr, 0, size);
  25	return vaddr;
  26}
  27
  28static inline void
  29dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
  30		      void *cpu_addr, dma_addr_t dma_handle)
  31{
  32	if (cpu_addr)
  33		dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
  34				  size, cpu_addr, dma_handle);
  35}
  36
  37static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
  38{
  39	struct usb_string_descriptor	*s_desc;
  40	u32				string_length;
  41
  42	/* Serial string: */
  43	s_desc = (struct usb_string_descriptor *)strings->serial;
  44	utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
  45			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  46			DBC_MAX_STRING_LENGTH);
  47
  48	s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2;
  49	s_desc->bDescriptorType	= USB_DT_STRING;
  50	string_length		= s_desc->bLength;
  51	string_length		<<= 8;
  52
  53	/* Product string: */
  54	s_desc = (struct usb_string_descriptor *)strings->product;
  55	utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
  56			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  57			DBC_MAX_STRING_LENGTH);
  58
  59	s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2;
  60	s_desc->bDescriptorType	= USB_DT_STRING;
  61	string_length		+= s_desc->bLength;
  62	string_length		<<= 8;
  63
  64	/* Manufacture string: */
  65	s_desc = (struct usb_string_descriptor *)strings->manufacturer;
  66	utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
  67			strlen(DBC_STRING_MANUFACTURER),
  68			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  69			DBC_MAX_STRING_LENGTH);
  70
  71	s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
  72	s_desc->bDescriptorType	= USB_DT_STRING;
  73	string_length		+= s_desc->bLength;
  74	string_length		<<= 8;
  75
  76	/* String0: */
  77	strings->string0[0]	= 4;
  78	strings->string0[1]	= USB_DT_STRING;
  79	strings->string0[2]	= 0x09;
  80	strings->string0[3]	= 0x04;
  81	string_length		+= 4;
  82
  83	return string_length;
  84}
  85
  86static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
  87{
  88	struct xhci_dbc		*dbc;
  89	struct dbc_info_context	*info;
  90	struct xhci_ep_ctx	*ep_ctx;
  91	u32			dev_info;
  92	dma_addr_t		deq, dma;
  93	unsigned int		max_burst;
  94
  95	dbc = xhci->dbc;
  96	if (!dbc)
  97		return;
  98
  99	/* Populate info Context: */
 100	info			= (struct dbc_info_context *)dbc->ctx->bytes;
 101	dma			= dbc->string_dma;
 102	info->string0		= cpu_to_le64(dma);
 103	info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
 104	info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
 105	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
 106	info->length		= cpu_to_le32(string_length);
 107
 108	/* Populate bulk out endpoint context: */
 109	ep_ctx			= dbc_bulkout_ctx(dbc);
 110	max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
 111	deq			= dbc_bulkout_enq(dbc);
 112	ep_ctx->ep_info		= 0;
 113	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
 114	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state);
 115
 116	/* Populate bulk in endpoint context: */
 117	ep_ctx			= dbc_bulkin_ctx(dbc);
 118	deq			= dbc_bulkin_enq(dbc);
 119	ep_ctx->ep_info		= 0;
 120	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
 121	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state);
 122
 123	/* Set DbC context and info registers: */
 124	xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
 125
 126	dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
 127	writel(dev_info, &dbc->regs->devinfo1);
 128
 129	dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
 130	writel(dev_info, &dbc->regs->devinfo2);
 131}
 132
 133static void xhci_dbc_giveback(struct dbc_request *req, int status)
 134	__releases(&dbc->lock)
 135	__acquires(&dbc->lock)
 136{
 137	struct dbc_ep		*dep = req->dep;
 138	struct xhci_dbc		*dbc = dep->dbc;
 139	struct xhci_hcd		*xhci = dbc->xhci;
 140	struct device		*dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
 141
 142	list_del_init(&req->list_pending);
 143	req->trb_dma = 0;
 144	req->trb = NULL;
 145
 146	if (req->status == -EINPROGRESS)
 147		req->status = status;
 148
 149	trace_xhci_dbc_giveback_request(req);
 150
 151	dma_unmap_single(dev,
 152			 req->dma,
 153			 req->length,
 154			 dbc_ep_dma_direction(dep));
 155
 156	/* Give back the transfer request: */
 157	spin_unlock(&dbc->lock);
 158	req->complete(xhci, req);
 159	spin_lock(&dbc->lock);
 160}
 161
 162static void xhci_dbc_flush_single_request(struct dbc_request *req)
 163{
 164	union xhci_trb	*trb = req->trb;
 165
 166	trb->generic.field[0]	= 0;
 167	trb->generic.field[1]	= 0;
 168	trb->generic.field[2]	= 0;
 169	trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE);
 170	trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
 171
 172	xhci_dbc_giveback(req, -ESHUTDOWN);
 173}
 174
 175static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
 176{
 177	struct dbc_request	*req, *tmp;
 178
 179	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
 180		xhci_dbc_flush_single_request(req);
 181}
 182
 183static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
 184{
 185	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
 186	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
 187}
 188
 189struct dbc_request *
 190dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
 191{
 192	struct dbc_request	*req;
 193
 194	req = kzalloc(sizeof(*req), gfp_flags);
 195	if (!req)
 196		return NULL;
 197
 198	req->dep = dep;
 199	INIT_LIST_HEAD(&req->list_pending);
 200	INIT_LIST_HEAD(&req->list_pool);
 201	req->direction = dep->direction;
 202
 203	trace_xhci_dbc_alloc_request(req);
 204
 205	return req;
 206}
 207
 208void
 209dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
 210{
 211	trace_xhci_dbc_free_request(req);
 212
 213	kfree(req);
 214}
 215
 216static void
 217xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
 218		   u32 field2, u32 field3, u32 field4)
 219{
 220	union xhci_trb		*trb, *next;
 221
 222	trb = ring->enqueue;
 223	trb->generic.field[0]	= cpu_to_le32(field1);
 224	trb->generic.field[1]	= cpu_to_le32(field2);
 225	trb->generic.field[2]	= cpu_to_le32(field3);
 226	trb->generic.field[3]	= cpu_to_le32(field4);
 227
 228	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
 229
 230	ring->num_trbs_free--;
 231	next = ++(ring->enqueue);
 232	if (TRB_TYPE_LINK_LE32(next->link.control)) {
 233		next->link.control ^= cpu_to_le32(TRB_CYCLE);
 234		ring->enqueue = ring->enq_seg->trbs;
 235		ring->cycle_state ^= 1;
 236	}
 237}
 238
 239static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
 240				  struct dbc_request *req)
 241{
 242	u64			addr;
 243	union xhci_trb		*trb;
 244	unsigned int		num_trbs;
 245	struct xhci_dbc		*dbc = dep->dbc;
 246	struct xhci_ring	*ring = dep->ring;
 247	u32			length, control, cycle;
 248
 249	num_trbs = count_trbs(req->dma, req->length);
 250	WARN_ON(num_trbs != 1);
 251	if (ring->num_trbs_free < num_trbs)
 252		return -EBUSY;
 253
 254	addr	= req->dma;
 255	trb	= ring->enqueue;
 256	cycle	= ring->cycle_state;
 257	length	= TRB_LEN(req->length);
 258	control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC;
 259
 260	if (cycle)
 261		control &= cpu_to_le32(~TRB_CYCLE);
 262	else
 263		control |= cpu_to_le32(TRB_CYCLE);
 264
 265	req->trb = ring->enqueue;
 266	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
 267	xhci_dbc_queue_trb(ring,
 268			   lower_32_bits(addr),
 269			   upper_32_bits(addr),
 270			   length, control);
 271
 272	/*
 273	 * Add a barrier between writes of trb fields and flipping
 274	 * the cycle bit:
 275	 */
 276	wmb();
 277
 278	if (cycle)
 279		trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
 280	else
 281		trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
 282
 283	writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
 284
 285	return 0;
 286}
 287
 288static int
 289dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
 290{
 291	int			ret;
 292	struct device		*dev;
 293	struct xhci_dbc		*dbc = dep->dbc;
 294	struct xhci_hcd		*xhci = dbc->xhci;
 295
 296	dev = xhci_to_hcd(xhci)->self.sysdev;
 297
 298	if (!req->length || !req->buf)
 299		return -EINVAL;
 300
 301	req->actual		= 0;
 302	req->status		= -EINPROGRESS;
 303
 304	req->dma = dma_map_single(dev,
 305				  req->buf,
 306				  req->length,
 307				  dbc_ep_dma_direction(dep));
 308	if (dma_mapping_error(dev, req->dma)) {
 309		xhci_err(xhci, "failed to map buffer\n");
 310		return -EFAULT;
 311	}
 312
 313	ret = xhci_dbc_queue_bulk_tx(dep, req);
 314	if (ret) {
 315		xhci_err(xhci, "failed to queue trbs\n");
 316		dma_unmap_single(dev,
 317				 req->dma,
 318				 req->length,
 319				 dbc_ep_dma_direction(dep));
 320		return -EFAULT;
 321	}
 322
 323	list_add_tail(&req->list_pending, &dep->list_pending);
 324
 325	return 0;
 326}
 327
 328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
 329		 gfp_t gfp_flags)
 330{
 331	unsigned long		flags;
 332	struct xhci_dbc		*dbc = dep->dbc;
 333	int			ret = -ESHUTDOWN;
 334
 335	spin_lock_irqsave(&dbc->lock, flags);
 336	if (dbc->state == DS_CONFIGURED)
 337		ret = dbc_ep_do_queue(dep, req);
 338	spin_unlock_irqrestore(&dbc->lock, flags);
 339
 340	mod_delayed_work(system_wq, &dbc->event_work, 0);
 341
 342	trace_xhci_dbc_queue_request(req);
 343
 344	return ret;
 345}
 346
 347static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
 348{
 349	struct dbc_ep		*dep;
 350	struct xhci_dbc		*dbc = xhci->dbc;
 351
 352	dep			= &dbc->eps[direction];
 353	dep->dbc		= dbc;
 354	dep->direction		= direction;
 355	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
 356
 357	INIT_LIST_HEAD(&dep->list_pending);
 358}
 359
 360static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
 361{
 362	xhci_dbc_do_eps_init(xhci, BULK_OUT);
 363	xhci_dbc_do_eps_init(xhci, BULK_IN);
 364}
 365
 366static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
 367{
 368	struct xhci_dbc		*dbc = xhci->dbc;
 369
 370	memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
 371}
 372
 373static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 374{
 375	int			ret;
 376	dma_addr_t		deq;
 377	u32			string_length;
 378	struct xhci_dbc		*dbc = xhci->dbc;
 379
 380	/* Allocate various rings for events and transfers: */
 381	dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
 382	if (!dbc->ring_evt)
 383		goto evt_fail;
 384
 385	dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
 386	if (!dbc->ring_in)
 387		goto in_fail;
 388
 389	dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
 390	if (!dbc->ring_out)
 391		goto out_fail;
 392
 393	/* Allocate and populate ERST: */
 394	ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
 395	if (ret)
 396		goto erst_fail;
 397
 398	/* Allocate context data structure: */
 399	dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
 400	if (!dbc->ctx)
 401		goto ctx_fail;
 402
 403	/* Allocate the string table: */
 404	dbc->string_size = sizeof(struct dbc_str_descs);
 405	dbc->string = dbc_dma_alloc_coherent(xhci,
 406					     dbc->string_size,
 407					     &dbc->string_dma,
 408					     flags);
 409	if (!dbc->string)
 410		goto string_fail;
 411
 412	/* Setup ERST register: */
 413	writel(dbc->erst.erst_size, &dbc->regs->ersts);
 414	xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
 415	deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 416				   dbc->ring_evt->dequeue);
 417	xhci_write_64(xhci, deq, &dbc->regs->erdp);
 418
 419	/* Setup strings and contexts: */
 420	string_length = xhci_dbc_populate_strings(dbc->string);
 421	xhci_dbc_init_contexts(xhci, string_length);
 422
 423	mmiowb();
 424
 425	xhci_dbc_eps_init(xhci);
 426	dbc->state = DS_INITIALIZED;
 427
 428	return 0;
 429
 430string_fail:
 431	xhci_free_container_ctx(xhci, dbc->ctx);
 432	dbc->ctx = NULL;
 433ctx_fail:
 434	xhci_free_erst(xhci, &dbc->erst);
 435erst_fail:
 436	xhci_ring_free(xhci, dbc->ring_out);
 437	dbc->ring_out = NULL;
 438out_fail:
 439	xhci_ring_free(xhci, dbc->ring_in);
 440	dbc->ring_in = NULL;
 441in_fail:
 442	xhci_ring_free(xhci, dbc->ring_evt);
 443	dbc->ring_evt = NULL;
 444evt_fail:
 445	return -ENOMEM;
 446}
 447
 448static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
 449{
 450	struct xhci_dbc		*dbc = xhci->dbc;
 451
 452	if (!dbc)
 453		return;
 454
 455	xhci_dbc_eps_exit(xhci);
 456
 457	if (dbc->string) {
 458		dbc_dma_free_coherent(xhci,
 459				      dbc->string_size,
 460				      dbc->string, dbc->string_dma);
 461		dbc->string = NULL;
 462	}
 463
 464	xhci_free_container_ctx(xhci, dbc->ctx);
 465	dbc->ctx = NULL;
 466
 467	xhci_free_erst(xhci, &dbc->erst);
 468	xhci_ring_free(xhci, dbc->ring_out);
 469	xhci_ring_free(xhci, dbc->ring_in);
 470	xhci_ring_free(xhci, dbc->ring_evt);
 471	dbc->ring_in = NULL;
 472	dbc->ring_out = NULL;
 473	dbc->ring_evt = NULL;
 474}
 475
 476static int xhci_do_dbc_start(struct xhci_hcd *xhci)
 477{
 478	int			ret;
 479	u32			ctrl;
 480	struct xhci_dbc		*dbc = xhci->dbc;
 481
 482	if (dbc->state != DS_DISABLED)
 483		return -EINVAL;
 484
 485	writel(0, &dbc->regs->control);
 486	ret = xhci_handshake(&dbc->regs->control,
 487			     DBC_CTRL_DBC_ENABLE,
 488			     0, 1000);
 489	if (ret)
 490		return ret;
 491
 492	ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
 493	if (ret)
 494		return ret;
 495
 496	ctrl = readl(&dbc->regs->control);
 497	writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
 498	       &dbc->regs->control);
 499	ret = xhci_handshake(&dbc->regs->control,
 500			     DBC_CTRL_DBC_ENABLE,
 501			     DBC_CTRL_DBC_ENABLE, 1000);
 502	if (ret)
 503		return ret;
 504
 505	dbc->state = DS_ENABLED;
 506
 507	return 0;
 508}
 509
 510static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
 511{
 512	struct xhci_dbc		*dbc = xhci->dbc;
 513
 514	if (dbc->state == DS_DISABLED)
 515		return;
 516
 517	writel(0, &dbc->regs->control);
 518	xhci_dbc_mem_cleanup(xhci);
 519	dbc->state = DS_DISABLED;
 520}
 521
 522static int xhci_dbc_start(struct xhci_hcd *xhci)
 523{
 524	int			ret;
 525	unsigned long		flags;
 526	struct xhci_dbc		*dbc = xhci->dbc;
 527
 528	WARN_ON(!dbc);
 529
 530	pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
 531
 532	spin_lock_irqsave(&dbc->lock, flags);
 533	ret = xhci_do_dbc_start(xhci);
 534	spin_unlock_irqrestore(&dbc->lock, flags);
 535
 536	if (ret) {
 537		pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
 538		return ret;
 539	}
 540
 541	return mod_delayed_work(system_wq, &dbc->event_work, 1);
 542}
 543
 544static void xhci_dbc_stop(struct xhci_hcd *xhci)
 545{
 546	unsigned long		flags;
 547	struct xhci_dbc		*dbc = xhci->dbc;
 548	struct dbc_port		*port = &dbc->port;
 549
 550	WARN_ON(!dbc);
 551
 552	cancel_delayed_work_sync(&dbc->event_work);
 553
 554	if (port->registered)
 555		xhci_dbc_tty_unregister_device(xhci);
 556
 557	spin_lock_irqsave(&dbc->lock, flags);
 558	xhci_do_dbc_stop(xhci);
 559	spin_unlock_irqrestore(&dbc->lock, flags);
 560
 561	pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
 562}
 563
 564static void
 565dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
 566{
 567	u32			portsc;
 568	struct xhci_dbc		*dbc = xhci->dbc;
 569
 570	portsc = readl(&dbc->regs->portsc);
 571	if (portsc & DBC_PORTSC_CONN_CHANGE)
 572		xhci_info(xhci, "DbC port connect change\n");
 573
 574	if (portsc & DBC_PORTSC_RESET_CHANGE)
 575		xhci_info(xhci, "DbC port reset change\n");
 576
 577	if (portsc & DBC_PORTSC_LINK_CHANGE)
 578		xhci_info(xhci, "DbC port link status change\n");
 579
 580	if (portsc & DBC_PORTSC_CONFIG_CHANGE)
 581		xhci_info(xhci, "DbC config error change\n");
 582
 583	/* Port reset change bit will be cleared in other place: */
 584	writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
 585}
 586
 587static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
 588{
 589	struct dbc_ep		*dep;
 590	struct xhci_ring	*ring;
 591	int			ep_id;
 592	int			status;
 593	u32			comp_code;
 594	size_t			remain_length;
 595	struct dbc_request	*req = NULL, *r;
 596
 597	comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
 598	remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
 599	ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
 600	dep		= (ep_id == EPID_OUT) ?
 601				get_out_ep(xhci) : get_in_ep(xhci);
 602	ring		= dep->ring;
 603
 604	switch (comp_code) {
 605	case COMP_SUCCESS:
 606		remain_length = 0;
 607	/* FALLTHROUGH */
 608	case COMP_SHORT_PACKET:
 609		status = 0;
 610		break;
 611	case COMP_TRB_ERROR:
 612	case COMP_BABBLE_DETECTED_ERROR:
 613	case COMP_USB_TRANSACTION_ERROR:
 614	case COMP_STALL_ERROR:
 615		xhci_warn(xhci, "tx error %d detected\n", comp_code);
 616		status = -comp_code;
 617		break;
 618	default:
 619		xhci_err(xhci, "unknown tx error %d\n", comp_code);
 620		status = -comp_code;
 621		break;
 622	}
 623
 624	/* Match the pending request: */
 625	list_for_each_entry(r, &dep->list_pending, list_pending) {
 626		if (r->trb_dma == event->trans_event.buffer) {
 627			req = r;
 628			break;
 629		}
 630	}
 631
 632	if (!req) {
 633		xhci_warn(xhci, "no matched request\n");
 634		return;
 635	}
 636
 637	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
 638
 639	ring->num_trbs_free++;
 640	req->actual = req->length - remain_length;
 641	xhci_dbc_giveback(req, status);
 642}
 643
 644static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
 645{
 646	dma_addr_t		deq;
 647	struct dbc_ep		*dep;
 648	union xhci_trb		*evt;
 649	u32			ctrl, portsc;
 650	struct xhci_hcd		*xhci = dbc->xhci;
 651	bool			update_erdp = false;
 652
 653	/* DbC state machine: */
 654	switch (dbc->state) {
 655	case DS_DISABLED:
 656	case DS_INITIALIZED:
 657
 658		return EVT_ERR;
 659	case DS_ENABLED:
 660		portsc = readl(&dbc->regs->portsc);
 661		if (portsc & DBC_PORTSC_CONN_STATUS) {
 662			dbc->state = DS_CONNECTED;
 663			xhci_info(xhci, "DbC connected\n");
 664		}
 665
 666		return EVT_DONE;
 667	case DS_CONNECTED:
 668		ctrl = readl(&dbc->regs->control);
 669		if (ctrl & DBC_CTRL_DBC_RUN) {
 670			dbc->state = DS_CONFIGURED;
 671			xhci_info(xhci, "DbC configured\n");
 672			portsc = readl(&dbc->regs->portsc);
 673			writel(portsc, &dbc->regs->portsc);
 674			return EVT_GSER;
 675		}
 676
 677		return EVT_DONE;
 678	case DS_CONFIGURED:
 679		/* Handle cable unplug event: */
 680		portsc = readl(&dbc->regs->portsc);
 681		if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
 682		    !(portsc & DBC_PORTSC_CONN_STATUS)) {
 683			xhci_info(xhci, "DbC cable unplugged\n");
 684			dbc->state = DS_ENABLED;
 685			xhci_dbc_flush_reqests(dbc);
 686
 687			return EVT_DISC;
 688		}
 689
 690		/* Handle debug port reset event: */
 691		if (portsc & DBC_PORTSC_RESET_CHANGE) {
 692			xhci_info(xhci, "DbC port reset\n");
 693			writel(portsc, &dbc->regs->portsc);
 694			dbc->state = DS_ENABLED;
 695			xhci_dbc_flush_reqests(dbc);
 696
 697			return EVT_DISC;
 698		}
 699
 700		/* Handle endpoint stall event: */
 701		ctrl = readl(&dbc->regs->control);
 702		if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
 703		    (ctrl & DBC_CTRL_HALT_OUT_TR)) {
 704			xhci_info(xhci, "DbC Endpoint stall\n");
 705			dbc->state = DS_STALLED;
 706
 707			if (ctrl & DBC_CTRL_HALT_IN_TR) {
 708				dep = get_in_ep(xhci);
 709				xhci_dbc_flush_endpoint_requests(dep);
 710			}
 711
 712			if (ctrl & DBC_CTRL_HALT_OUT_TR) {
 713				dep = get_out_ep(xhci);
 714				xhci_dbc_flush_endpoint_requests(dep);
 715			}
 716
 717			return EVT_DONE;
 718		}
 719
 720		/* Clear DbC run change bit: */
 721		if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
 722			writel(ctrl, &dbc->regs->control);
 723			ctrl = readl(&dbc->regs->control);
 724		}
 725
 726		break;
 727	case DS_STALLED:
 728		ctrl = readl(&dbc->regs->control);
 729		if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
 730		    !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
 731		    (ctrl & DBC_CTRL_DBC_RUN)) {
 732			dbc->state = DS_CONFIGURED;
 733			break;
 734		}
 735
 736		return EVT_DONE;
 737	default:
 738		xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
 739		break;
 740	}
 741
 742	/* Handle the events in the event ring: */
 743	evt = dbc->ring_evt->dequeue;
 744	while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
 745			dbc->ring_evt->cycle_state) {
 746		/*
 747		 * Add a barrier between reading the cycle flag and any
 748		 * reads of the event's flags/data below:
 749		 */
 750		rmb();
 751
 752		trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
 753
 754		switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
 755		case TRB_TYPE(TRB_PORT_STATUS):
 756			dbc_handle_port_status(xhci, evt);
 757			break;
 758		case TRB_TYPE(TRB_TRANSFER):
 759			dbc_handle_xfer_event(xhci, evt);
 760			break;
 761		default:
 762			break;
 763		}
 764
 765		inc_deq(xhci, dbc->ring_evt);
 766		evt = dbc->ring_evt->dequeue;
 767		update_erdp = true;
 768	}
 769
 770	/* Update event ring dequeue pointer: */
 771	if (update_erdp) {
 772		deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 773					   dbc->ring_evt->dequeue);
 774		xhci_write_64(xhci, deq, &dbc->regs->erdp);
 775	}
 776
 777	return EVT_DONE;
 778}
 779
 780static void xhci_dbc_handle_events(struct work_struct *work)
 781{
 782	int			ret;
 783	enum evtreturn		evtr;
 784	struct xhci_dbc		*dbc;
 785	unsigned long		flags;
 786	struct xhci_hcd		*xhci;
 787
 788	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
 789	xhci = dbc->xhci;
 790
 791	spin_lock_irqsave(&dbc->lock, flags);
 792	evtr = xhci_dbc_do_handle_events(dbc);
 793	spin_unlock_irqrestore(&dbc->lock, flags);
 794
 795	switch (evtr) {
 796	case EVT_GSER:
 797		ret = xhci_dbc_tty_register_device(xhci);
 798		if (ret) {
 799			xhci_err(xhci, "failed to alloc tty device\n");
 800			break;
 801		}
 802
 803		xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
 804		break;
 805	case EVT_DISC:
 806		xhci_dbc_tty_unregister_device(xhci);
 807		break;
 808	case EVT_DONE:
 809		break;
 810	default:
 811		xhci_info(xhci, "stop handling dbc events\n");
 812		return;
 813	}
 814
 815	mod_delayed_work(system_wq, &dbc->event_work, 1);
 816}
 817
 818static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
 819{
 820	unsigned long		flags;
 821
 822	spin_lock_irqsave(&xhci->lock, flags);
 823	kfree(xhci->dbc);
 824	xhci->dbc = NULL;
 825	spin_unlock_irqrestore(&xhci->lock, flags);
 826}
 827
 828static int xhci_do_dbc_init(struct xhci_hcd *xhci)
 829{
 830	u32			reg;
 831	struct xhci_dbc		*dbc;
 832	unsigned long		flags;
 833	void __iomem		*base;
 834	int			dbc_cap_offs;
 835
 836	base = &xhci->cap_regs->hc_capbase;
 837	dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
 838	if (!dbc_cap_offs)
 839		return -ENODEV;
 840
 841	dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
 842	if (!dbc)
 843		return -ENOMEM;
 844
 845	dbc->regs = base + dbc_cap_offs;
 846
 847	/* We will avoid using DbC in xhci driver if it's in use. */
 848	reg = readl(&dbc->regs->control);
 849	if (reg & DBC_CTRL_DBC_ENABLE) {
 850		kfree(dbc);
 851		return -EBUSY;
 852	}
 853
 854	spin_lock_irqsave(&xhci->lock, flags);
 855	if (xhci->dbc) {
 856		spin_unlock_irqrestore(&xhci->lock, flags);
 857		kfree(dbc);
 858		return -EBUSY;
 859	}
 860	xhci->dbc = dbc;
 861	spin_unlock_irqrestore(&xhci->lock, flags);
 862
 863	dbc->xhci = xhci;
 864	INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
 865	spin_lock_init(&dbc->lock);
 866
 867	return 0;
 868}
 869
 870static ssize_t dbc_show(struct device *dev,
 871			struct device_attribute *attr,
 872			char *buf)
 873{
 874	const char		*p;
 875	struct xhci_dbc		*dbc;
 876	struct xhci_hcd		*xhci;
 877
 878	xhci = hcd_to_xhci(dev_get_drvdata(dev));
 879	dbc = xhci->dbc;
 880
 881	switch (dbc->state) {
 882	case DS_DISABLED:
 883		p = "disabled";
 884		break;
 885	case DS_INITIALIZED:
 886		p = "initialized";
 887		break;
 888	case DS_ENABLED:
 889		p = "enabled";
 890		break;
 891	case DS_CONNECTED:
 892		p = "connected";
 893		break;
 894	case DS_CONFIGURED:
 895		p = "configured";
 896		break;
 897	case DS_STALLED:
 898		p = "stalled";
 899		break;
 900	default:
 901		p = "unknown";
 902	}
 903
 904	return sprintf(buf, "%s\n", p);
 905}
 906
 907static ssize_t dbc_store(struct device *dev,
 908			 struct device_attribute *attr,
 909			 const char *buf, size_t count)
 910{
 911	struct xhci_dbc		*dbc;
 912	struct xhci_hcd		*xhci;
 913
 914	xhci = hcd_to_xhci(dev_get_drvdata(dev));
 915	dbc = xhci->dbc;
 916
 917	if (!strncmp(buf, "enable", 6))
 918		xhci_dbc_start(xhci);
 919	else if (!strncmp(buf, "disable", 7))
 920		xhci_dbc_stop(xhci);
 921	else
 922		return -EINVAL;
 923
 924	return count;
 925}
 926
 927static DEVICE_ATTR_RW(dbc);
 928
 929int xhci_dbc_init(struct xhci_hcd *xhci)
 930{
 931	int			ret;
 932	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
 933
 934	ret = xhci_do_dbc_init(xhci);
 935	if (ret)
 936		goto init_err3;
 937
 938	ret = xhci_dbc_tty_register_driver(xhci);
 939	if (ret)
 940		goto init_err2;
 941
 942	ret = device_create_file(dev, &dev_attr_dbc);
 943	if (ret)
 944		goto init_err1;
 945
 946	return 0;
 947
 948init_err1:
 949	xhci_dbc_tty_unregister_driver();
 950init_err2:
 951	xhci_do_dbc_exit(xhci);
 952init_err3:
 953	return ret;
 954}
 955
 956void xhci_dbc_exit(struct xhci_hcd *xhci)
 957{
 958	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
 959
 960	if (!xhci->dbc)
 961		return;
 962
 963	device_remove_file(dev, &dev_attr_dbc);
 964	xhci_dbc_tty_unregister_driver();
 965	xhci_dbc_stop(xhci);
 966	xhci_do_dbc_exit(xhci);
 967}
 968
 969#ifdef CONFIG_PM
 970int xhci_dbc_suspend(struct xhci_hcd *xhci)
 971{
 972	struct xhci_dbc		*dbc = xhci->dbc;
 973
 974	if (!dbc)
 975		return 0;
 976
 977	if (dbc->state == DS_CONFIGURED)
 978		dbc->resume_required = 1;
 979
 980	xhci_dbc_stop(xhci);
 981
 982	return 0;
 983}
 984
 985int xhci_dbc_resume(struct xhci_hcd *xhci)
 986{
 987	int			ret = 0;
 988	struct xhci_dbc		*dbc = xhci->dbc;
 989
 990	if (!dbc)
 991		return 0;
 992
 993	if (dbc->resume_required) {
 994		dbc->resume_required = 0;
 995		xhci_dbc_start(xhci);
 996	}
 997
 998	return ret;
 999}
1000#endif /* CONFIG_PM */