Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * xhci-dbgcap.c - xHCI debug capability support
   4 *
   5 * Copyright (C) 2017 Intel Corporation
   6 *
   7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
   8 */
   9#include <linux/bug.h>
  10#include <linux/device.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
  13#include <linux/kstrtox.h>
  14#include <linux/list.h>
  15#include <linux/nls.h>
  16#include <linux/pm_runtime.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/string.h>
  20#include <linux/sysfs.h>
  21#include <linux/types.h>
  22#include <linux/workqueue.h>
  23
  24#include <linux/io-64-nonatomic-lo-hi.h>
  25
  26#include <asm/byteorder.h>
  27
  28#include "xhci.h"
  29#include "xhci-trace.h"
  30#include "xhci-dbgcap.h"
  31
  32static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
  33{
  34	if (!ctx)
  35		return;
  36	dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
  37	kfree(ctx);
  38}
  39
  40/* we use only one segment for DbC rings */
  41static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
  42{
  43	if (!ring)
  44		return;
  45
  46	if (ring->first_seg) {
  47		dma_free_coherent(dev, TRB_SEGMENT_SIZE,
  48				  ring->first_seg->trbs,
  49				  ring->first_seg->dma);
  50		kfree(ring->first_seg);
  51	}
  52	kfree(ring);
  53}
  54
  55static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
  56{
  57	struct usb_string_descriptor	*s_desc;
  58	u32				string_length;
  59
  60	/* Serial string: */
  61	s_desc = (struct usb_string_descriptor *)strings->serial;
  62	utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
  63			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  64			DBC_MAX_STRING_LENGTH);
  65
  66	s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2;
  67	s_desc->bDescriptorType	= USB_DT_STRING;
  68	string_length		= s_desc->bLength;
  69	string_length		<<= 8;
  70
  71	/* Product string: */
  72	s_desc = (struct usb_string_descriptor *)strings->product;
  73	utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
  74			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  75			DBC_MAX_STRING_LENGTH);
  76
  77	s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2;
  78	s_desc->bDescriptorType	= USB_DT_STRING;
  79	string_length		+= s_desc->bLength;
  80	string_length		<<= 8;
  81
  82	/* Manufacture string: */
  83	s_desc = (struct usb_string_descriptor *)strings->manufacturer;
  84	utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
  85			strlen(DBC_STRING_MANUFACTURER),
  86			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
  87			DBC_MAX_STRING_LENGTH);
  88
  89	s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
  90	s_desc->bDescriptorType	= USB_DT_STRING;
  91	string_length		+= s_desc->bLength;
  92	string_length		<<= 8;
  93
  94	/* String0: */
  95	strings->string0[0]	= 4;
  96	strings->string0[1]	= USB_DT_STRING;
  97	strings->string0[2]	= 0x09;
  98	strings->string0[3]	= 0x04;
  99	string_length		+= 4;
 100
 101	return string_length;
 102}
 103
 104static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
 105{
 106	struct dbc_info_context	*info;
 107	struct xhci_ep_ctx	*ep_ctx;
 108	u32			dev_info;
 109	dma_addr_t		deq, dma;
 110	unsigned int		max_burst;
 111
 112	if (!dbc)
 113		return;
 114
 115	/* Populate info Context: */
 116	info			= (struct dbc_info_context *)dbc->ctx->bytes;
 117	dma			= dbc->string_dma;
 118	info->string0		= cpu_to_le64(dma);
 119	info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
 120	info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
 121	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
 122	info->length		= cpu_to_le32(string_length);
 123
 124	/* Populate bulk out endpoint context: */
 125	ep_ctx			= dbc_bulkout_ctx(dbc);
 126	max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
 127	deq			= dbc_bulkout_enq(dbc);
 128	ep_ctx->ep_info		= 0;
 129	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
 130	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state);
 131
 132	/* Populate bulk in endpoint context: */
 133	ep_ctx			= dbc_bulkin_ctx(dbc);
 134	deq			= dbc_bulkin_enq(dbc);
 135	ep_ctx->ep_info		= 0;
 136	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
 137	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state);
 138
 139	/* Set DbC context and info registers: */
 140	lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
 141
 142	dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
 143	writel(dev_info, &dbc->regs->devinfo1);
 144
 145	dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
 146	writel(dev_info, &dbc->regs->devinfo2);
 147}
 148
 149static void xhci_dbc_giveback(struct dbc_request *req, int status)
 150	__releases(&dbc->lock)
 151	__acquires(&dbc->lock)
 152{
 153	struct xhci_dbc		*dbc = req->dbc;
 154	struct device		*dev = dbc->dev;
 155
 156	list_del_init(&req->list_pending);
 157	req->trb_dma = 0;
 158	req->trb = NULL;
 159
 160	if (req->status == -EINPROGRESS)
 161		req->status = status;
 162
 163	trace_xhci_dbc_giveback_request(req);
 164
 165	dma_unmap_single(dev,
 166			 req->dma,
 167			 req->length,
 168			 dbc_ep_dma_direction(req));
 169
 170	/* Give back the transfer request: */
 171	spin_unlock(&dbc->lock);
 172	req->complete(dbc, req);
 173	spin_lock(&dbc->lock);
 174}
 175
 176static void trb_to_noop(union xhci_trb *trb)
 177{
 178	trb->generic.field[0]	= 0;
 179	trb->generic.field[1]	= 0;
 180	trb->generic.field[2]	= 0;
 181	trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE);
 182	trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
 183}
 184
 185static void xhci_dbc_flush_single_request(struct dbc_request *req)
 186{
 187	trb_to_noop(req->trb);
 188	xhci_dbc_giveback(req, -ESHUTDOWN);
 189}
 190
 191static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
 192{
 193	struct dbc_request	*req, *tmp;
 194
 195	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
 196		xhci_dbc_flush_single_request(req);
 197}
 198
 199static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
 200{
 201	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
 202	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
 203}
 204
 205struct dbc_request *
 206dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
 207{
 208	struct dbc_request	*req;
 209
 210	if (direction != BULK_IN &&
 211	    direction != BULK_OUT)
 212		return NULL;
 213
 214	if (!dbc)
 215		return NULL;
 216
 217	req = kzalloc(sizeof(*req), flags);
 218	if (!req)
 219		return NULL;
 220
 221	req->dbc = dbc;
 222	INIT_LIST_HEAD(&req->list_pending);
 223	INIT_LIST_HEAD(&req->list_pool);
 224	req->direction = direction;
 225
 226	trace_xhci_dbc_alloc_request(req);
 227
 228	return req;
 229}
 230
 231void
 232dbc_free_request(struct dbc_request *req)
 233{
 234	trace_xhci_dbc_free_request(req);
 235
 236	kfree(req);
 237}
 238
 239static void
 240xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
 241		   u32 field2, u32 field3, u32 field4)
 242{
 243	union xhci_trb		*trb, *next;
 244
 245	trb = ring->enqueue;
 246	trb->generic.field[0]	= cpu_to_le32(field1);
 247	trb->generic.field[1]	= cpu_to_le32(field2);
 248	trb->generic.field[2]	= cpu_to_le32(field3);
 249	trb->generic.field[3]	= cpu_to_le32(field4);
 250
 251	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic,
 252				       xhci_trb_virt_to_dma(ring->enq_seg,
 253							    ring->enqueue));
 254	ring->num_trbs_free--;
 255	next = ++(ring->enqueue);
 256	if (TRB_TYPE_LINK_LE32(next->link.control)) {
 257		next->link.control ^= cpu_to_le32(TRB_CYCLE);
 258		ring->enqueue = ring->enq_seg->trbs;
 259		ring->cycle_state ^= 1;
 260	}
 261}
 262
 263static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
 264				  struct dbc_request *req)
 265{
 266	u64			addr;
 267	union xhci_trb		*trb;
 268	unsigned int		num_trbs;
 269	struct xhci_dbc		*dbc = req->dbc;
 270	struct xhci_ring	*ring = dep->ring;
 271	u32			length, control, cycle;
 272
 273	num_trbs = count_trbs(req->dma, req->length);
 274	WARN_ON(num_trbs != 1);
 275	if (ring->num_trbs_free < num_trbs)
 276		return -EBUSY;
 277
 278	addr	= req->dma;
 279	trb	= ring->enqueue;
 280	cycle	= ring->cycle_state;
 281	length	= TRB_LEN(req->length);
 282	control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC;
 283
 284	if (cycle)
 285		control &= cpu_to_le32(~TRB_CYCLE);
 286	else
 287		control |= cpu_to_le32(TRB_CYCLE);
 288
 289	req->trb = ring->enqueue;
 290	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
 291	xhci_dbc_queue_trb(ring,
 292			   lower_32_bits(addr),
 293			   upper_32_bits(addr),
 294			   length, control);
 295
 296	/*
 297	 * Add a barrier between writes of trb fields and flipping
 298	 * the cycle bit:
 299	 */
 300	wmb();
 301
 302	if (cycle)
 303		trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
 304	else
 305		trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
 306
 307	writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
 308
 309	return 0;
 310}
 311
 312static int
 313dbc_ep_do_queue(struct dbc_request *req)
 314{
 315	int			ret;
 316	struct xhci_dbc		*dbc = req->dbc;
 317	struct device		*dev = dbc->dev;
 318	struct dbc_ep		*dep = &dbc->eps[req->direction];
 319
 320	if (!req->length || !req->buf)
 321		return -EINVAL;
 322
 323	req->actual		= 0;
 324	req->status		= -EINPROGRESS;
 325
 326	req->dma = dma_map_single(dev,
 327				  req->buf,
 328				  req->length,
 329				  dbc_ep_dma_direction(dep));
 330	if (dma_mapping_error(dev, req->dma)) {
 331		dev_err(dbc->dev, "failed to map buffer\n");
 332		return -EFAULT;
 333	}
 334
 335	ret = xhci_dbc_queue_bulk_tx(dep, req);
 336	if (ret) {
 337		dev_err(dbc->dev, "failed to queue trbs\n");
 338		dma_unmap_single(dev,
 339				 req->dma,
 340				 req->length,
 341				 dbc_ep_dma_direction(dep));
 342		return -EFAULT;
 343	}
 344
 345	list_add_tail(&req->list_pending, &dep->list_pending);
 346
 347	return 0;
 348}
 349
 350int dbc_ep_queue(struct dbc_request *req)
 351{
 352	unsigned long		flags;
 353	struct xhci_dbc		*dbc = req->dbc;
 354	int			ret = -ESHUTDOWN;
 355
 356	if (!dbc)
 357		return -ENODEV;
 358
 359	if (req->direction != BULK_IN &&
 360	    req->direction != BULK_OUT)
 361		return -EINVAL;
 362
 363	spin_lock_irqsave(&dbc->lock, flags);
 364	if (dbc->state == DS_CONFIGURED)
 365		ret = dbc_ep_do_queue(req);
 366	spin_unlock_irqrestore(&dbc->lock, flags);
 367
 368	mod_delayed_work(system_wq, &dbc->event_work, 0);
 369
 370	trace_xhci_dbc_queue_request(req);
 371
 372	return ret;
 373}
 374
 375static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
 376{
 377	struct dbc_ep		*dep;
 378
 379	dep			= &dbc->eps[direction];
 380	dep->dbc		= dbc;
 381	dep->direction		= direction;
 382	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
 383
 384	INIT_LIST_HEAD(&dep->list_pending);
 385}
 386
 387static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
 388{
 389	xhci_dbc_do_eps_init(dbc, BULK_OUT);
 390	xhci_dbc_do_eps_init(dbc, BULK_IN);
 391}
 392
 393static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
 394{
 395	memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
 396}
 397
 398static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
 399		    struct xhci_erst *erst, gfp_t flags)
 400{
 401	erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
 402					   &erst->erst_dma_addr, flags);
 403	if (!erst->entries)
 404		return -ENOMEM;
 405
 406	erst->num_entries = 1;
 407	erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
 408	erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
 409	erst->entries[0].rsvd = 0;
 410	return 0;
 411}
 412
 413static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
 414{
 415	dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
 416			  erst->erst_dma_addr);
 417	erst->entries = NULL;
 418}
 419
 420static struct xhci_container_ctx *
 421dbc_alloc_ctx(struct device *dev, gfp_t flags)
 422{
 423	struct xhci_container_ctx *ctx;
 424
 425	ctx = kzalloc(sizeof(*ctx), flags);
 426	if (!ctx)
 427		return NULL;
 428
 429	/* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
 430	ctx->size = 3 * DBC_CONTEXT_SIZE;
 431	ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
 432	if (!ctx->bytes) {
 433		kfree(ctx);
 434		return NULL;
 435	}
 436	return ctx;
 437}
 438
 439static struct xhci_ring *
 440xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
 441{
 442	struct xhci_ring *ring;
 443	struct xhci_segment *seg;
 444	dma_addr_t dma;
 445
 446	ring = kzalloc(sizeof(*ring), flags);
 447	if (!ring)
 448		return NULL;
 449
 450	ring->num_segs = 1;
 451	ring->type = type;
 452
 453	seg = kzalloc(sizeof(*seg), flags);
 454	if (!seg)
 455		goto seg_fail;
 456
 457	ring->first_seg = seg;
 458	ring->last_seg = seg;
 459	seg->next = seg;
 460
 461	seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
 462	if (!seg->trbs)
 463		goto dma_fail;
 464
 465	seg->dma = dma;
 466
 467	/* Only event ring does not use link TRB */
 468	if (type != TYPE_EVENT) {
 469		union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
 470
 471		trb->link.segment_ptr = cpu_to_le64(dma);
 472		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
 473	}
 474	INIT_LIST_HEAD(&ring->td_list);
 475	xhci_initialize_ring_info(ring);
 476	return ring;
 477dma_fail:
 478	kfree(seg);
 479seg_fail:
 480	kfree(ring);
 481	return NULL;
 482}
 483
 484static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
 485{
 486	int			ret;
 487	dma_addr_t		deq;
 488	u32			string_length;
 489	struct device		*dev = dbc->dev;
 490
 491	/* Allocate various rings for events and transfers: */
 492	dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
 493	if (!dbc->ring_evt)
 494		goto evt_fail;
 495
 496	dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
 497	if (!dbc->ring_in)
 498		goto in_fail;
 499
 500	dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
 501	if (!dbc->ring_out)
 502		goto out_fail;
 503
 504	/* Allocate and populate ERST: */
 505	ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
 506	if (ret)
 507		goto erst_fail;
 508
 509	/* Allocate context data structure: */
 510	dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
 511	if (!dbc->ctx)
 512		goto ctx_fail;
 513
 514	/* Allocate the string table: */
 515	dbc->string_size = sizeof(*dbc->string);
 516	dbc->string = dma_alloc_coherent(dev, dbc->string_size,
 517					 &dbc->string_dma, flags);
 518	if (!dbc->string)
 519		goto string_fail;
 520
 521	/* Setup ERST register: */
 522	writel(dbc->erst.num_entries, &dbc->regs->ersts);
 523
 524	lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
 525	deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 526				   dbc->ring_evt->dequeue);
 527	lo_hi_writeq(deq, &dbc->regs->erdp);
 528
 529	/* Setup strings and contexts: */
 530	string_length = xhci_dbc_populate_strings(dbc->string);
 531	xhci_dbc_init_contexts(dbc, string_length);
 532
 533	xhci_dbc_eps_init(dbc);
 534	dbc->state = DS_INITIALIZED;
 535
 536	return 0;
 537
 538string_fail:
 539	dbc_free_ctx(dev, dbc->ctx);
 540	dbc->ctx = NULL;
 541ctx_fail:
 542	dbc_erst_free(dev, &dbc->erst);
 543erst_fail:
 544	dbc_ring_free(dev, dbc->ring_out);
 545	dbc->ring_out = NULL;
 546out_fail:
 547	dbc_ring_free(dev, dbc->ring_in);
 548	dbc->ring_in = NULL;
 549in_fail:
 550	dbc_ring_free(dev, dbc->ring_evt);
 551	dbc->ring_evt = NULL;
 552evt_fail:
 553	return -ENOMEM;
 554}
 555
 556static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
 557{
 558	if (!dbc)
 559		return;
 560
 561	xhci_dbc_eps_exit(dbc);
 562
 563	dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
 564	dbc->string = NULL;
 565
 566	dbc_free_ctx(dbc->dev, dbc->ctx);
 567	dbc->ctx = NULL;
 568
 569	dbc_erst_free(dbc->dev, &dbc->erst);
 570	dbc_ring_free(dbc->dev, dbc->ring_out);
 571	dbc_ring_free(dbc->dev, dbc->ring_in);
 572	dbc_ring_free(dbc->dev, dbc->ring_evt);
 573	dbc->ring_in = NULL;
 574	dbc->ring_out = NULL;
 575	dbc->ring_evt = NULL;
 576}
 577
 578static int xhci_do_dbc_start(struct xhci_dbc *dbc)
 579{
 580	int			ret;
 581	u32			ctrl;
 582
 583	if (dbc->state != DS_DISABLED)
 584		return -EINVAL;
 585
 586	writel(0, &dbc->regs->control);
 587	ret = xhci_handshake(&dbc->regs->control,
 588			     DBC_CTRL_DBC_ENABLE,
 589			     0, 1000);
 590	if (ret)
 591		return ret;
 592
 593	ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
 594	if (ret)
 595		return ret;
 596
 597	ctrl = readl(&dbc->regs->control);
 598	writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
 599	       &dbc->regs->control);
 600	ret = xhci_handshake(&dbc->regs->control,
 601			     DBC_CTRL_DBC_ENABLE,
 602			     DBC_CTRL_DBC_ENABLE, 1000);
 603	if (ret)
 604		return ret;
 605
 606	dbc->state = DS_ENABLED;
 607
 608	return 0;
 609}
 610
 611static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
 612{
 613	if (dbc->state == DS_DISABLED)
 614		return -EINVAL;
 615
 616	writel(0, &dbc->regs->control);
 617	dbc->state = DS_DISABLED;
 618
 619	return 0;
 620}
 621
 622static int xhci_dbc_start(struct xhci_dbc *dbc)
 623{
 624	int			ret;
 625	unsigned long		flags;
 626
 627	WARN_ON(!dbc);
 628
 629	pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
 630
 631	spin_lock_irqsave(&dbc->lock, flags);
 632	ret = xhci_do_dbc_start(dbc);
 633	spin_unlock_irqrestore(&dbc->lock, flags);
 634
 635	if (ret) {
 636		pm_runtime_put(dbc->dev); /* note this was self.controller */
 637		return ret;
 638	}
 639
 640	return mod_delayed_work(system_wq, &dbc->event_work,
 641				msecs_to_jiffies(dbc->poll_interval));
 642}
 643
 644static void xhci_dbc_stop(struct xhci_dbc *dbc)
 645{
 646	int ret;
 647	unsigned long		flags;
 648
 649	WARN_ON(!dbc);
 650
 651	switch (dbc->state) {
 652	case DS_DISABLED:
 653		return;
 654	case DS_CONFIGURED:
 655		if (dbc->driver->disconnect)
 656			dbc->driver->disconnect(dbc);
 657		break;
 658	default:
 659		break;
 660	}
 661
 662	cancel_delayed_work_sync(&dbc->event_work);
 663
 664	spin_lock_irqsave(&dbc->lock, flags);
 665	ret = xhci_do_dbc_stop(dbc);
 666	spin_unlock_irqrestore(&dbc->lock, flags);
 667	if (ret)
 668		return;
 669
 670	xhci_dbc_mem_cleanup(dbc);
 671	pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
 672}
 673
 674static void
 675handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
 676{
 677	if (halted) {
 678		dev_info(dbc->dev, "DbC Endpoint halted\n");
 679		dep->halted = 1;
 680
 681	} else if (dep->halted) {
 682		dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
 683		dep->halted = 0;
 684
 685		if (!list_empty(&dep->list_pending))
 686			writel(DBC_DOOR_BELL_TARGET(dep->direction),
 687			       &dbc->regs->doorbell);
 688	}
 689}
 690
 691static void
 692dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
 693{
 694	u32			portsc;
 695
 696	portsc = readl(&dbc->regs->portsc);
 697	if (portsc & DBC_PORTSC_CONN_CHANGE)
 698		dev_info(dbc->dev, "DbC port connect change\n");
 699
 700	if (portsc & DBC_PORTSC_RESET_CHANGE)
 701		dev_info(dbc->dev, "DbC port reset change\n");
 702
 703	if (portsc & DBC_PORTSC_LINK_CHANGE)
 704		dev_info(dbc->dev, "DbC port link status change\n");
 705
 706	if (portsc & DBC_PORTSC_CONFIG_CHANGE)
 707		dev_info(dbc->dev, "DbC config error change\n");
 708
 709	/* Port reset change bit will be cleared in other place: */
 710	writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
 711}
 712
 713static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
 714{
 715	struct dbc_ep		*dep;
 716	struct xhci_ring	*ring;
 717	int			ep_id;
 718	int			status;
 719	struct xhci_ep_ctx	*ep_ctx;
 720	u32			comp_code;
 721	size_t			remain_length;
 722	struct dbc_request	*req = NULL, *r;
 723
 724	comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
 725	remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
 726	ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
 727	dep		= (ep_id == EPID_OUT) ?
 728				get_out_ep(dbc) : get_in_ep(dbc);
 729	ep_ctx		= (ep_id == EPID_OUT) ?
 730				dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
 731	ring		= dep->ring;
 732
 733	/* Match the pending request: */
 734	list_for_each_entry(r, &dep->list_pending, list_pending) {
 735		if (r->trb_dma == event->trans_event.buffer) {
 736			req = r;
 737			break;
 738		}
 739		if (r->status == -COMP_STALL_ERROR) {
 740			dev_warn(dbc->dev, "Give back stale stalled req\n");
 741			ring->num_trbs_free++;
 742			xhci_dbc_giveback(r, 0);
 743		}
 744	}
 745
 746	if (!req) {
 747		dev_warn(dbc->dev, "no matched request\n");
 748		return;
 749	}
 750
 751	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic, req->trb_dma);
 752
 753	switch (comp_code) {
 754	case COMP_SUCCESS:
 755		remain_length = 0;
 756		fallthrough;
 757	case COMP_SHORT_PACKET:
 758		status = 0;
 759		break;
 760	case COMP_TRB_ERROR:
 761	case COMP_BABBLE_DETECTED_ERROR:
 762	case COMP_USB_TRANSACTION_ERROR:
 763		dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
 764		status = -comp_code;
 765		break;
 766	case COMP_STALL_ERROR:
 767		dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
 768			 event->trans_event.buffer, remain_length, ep_ctx->deq);
 769		status = 0;
 770		dep->halted = 1;
 771
 772		/*
 773		 * xHC DbC may trigger a STALL bulk xfer event when host sends a
 774		 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
 775		 * active bulk transfer.
 776		 *
 777		 * Don't give back this transfer request as hardware will later
 778		 * start processing TRBs starting from this 'STALLED' TRB,
 779		 * causing TRBs and requests to be out of sync.
 780		 *
 781		 * If STALL event shows some bytes were transferred then assume
 782		 * it's an actual transfer issue and give back the request.
 783		 * In this case mark the TRB as No-Op to avoid hw from using the
 784		 * TRB again.
 785		 */
 786
 787		if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
 788			dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
 789			if (remain_length == req->length) {
 790				dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
 791				req->status = -COMP_STALL_ERROR;
 792				req->actual = 0;
 793				return;
 794			}
 795			dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
 796			trb_to_noop(req->trb);
 797		}
 798		break;
 799
 800	default:
 801		dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
 802		status = -comp_code;
 803		break;
 804	}
 805
 806	ring->num_trbs_free++;
 807	req->actual = req->length - remain_length;
 808	xhci_dbc_giveback(req, status);
 809}
 810
 811static void inc_evt_deq(struct xhci_ring *ring)
 812{
 813	/* If on the last TRB of the segment go back to the beginning */
 814	if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
 815		ring->cycle_state ^= 1;
 816		ring->dequeue = ring->deq_seg->trbs;
 817		return;
 818	}
 819	ring->dequeue++;
 820}
 821
 822static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
 823{
 824	dma_addr_t		deq;
 825	union xhci_trb		*evt;
 826	u32			ctrl, portsc;
 827	bool			update_erdp = false;
 828
 829	/* DbC state machine: */
 830	switch (dbc->state) {
 831	case DS_DISABLED:
 832	case DS_INITIALIZED:
 833
 834		return EVT_ERR;
 835	case DS_ENABLED:
 836		portsc = readl(&dbc->regs->portsc);
 837		if (portsc & DBC_PORTSC_CONN_STATUS) {
 838			dbc->state = DS_CONNECTED;
 839			dev_info(dbc->dev, "DbC connected\n");
 840		}
 841
 842		return EVT_DONE;
 843	case DS_CONNECTED:
 844		ctrl = readl(&dbc->regs->control);
 845		if (ctrl & DBC_CTRL_DBC_RUN) {
 846			dbc->state = DS_CONFIGURED;
 847			dev_info(dbc->dev, "DbC configured\n");
 848			portsc = readl(&dbc->regs->portsc);
 849			writel(portsc, &dbc->regs->portsc);
 850			return EVT_GSER;
 851		}
 852
 853		return EVT_DONE;
 854	case DS_CONFIGURED:
 855		/* Handle cable unplug event: */
 856		portsc = readl(&dbc->regs->portsc);
 857		if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
 858		    !(portsc & DBC_PORTSC_CONN_STATUS)) {
 859			dev_info(dbc->dev, "DbC cable unplugged\n");
 860			dbc->state = DS_ENABLED;
 861			xhci_dbc_flush_requests(dbc);
 862
 863			return EVT_DISC;
 864		}
 865
 866		/* Handle debug port reset event: */
 867		if (portsc & DBC_PORTSC_RESET_CHANGE) {
 868			dev_info(dbc->dev, "DbC port reset\n");
 869			writel(portsc, &dbc->regs->portsc);
 870			dbc->state = DS_ENABLED;
 871			xhci_dbc_flush_requests(dbc);
 872
 873			return EVT_DISC;
 874		}
 875
 876		/* Check and handle changes in endpoint halt status */
 877		ctrl = readl(&dbc->regs->control);
 878		handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
 879		handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
 880
 881		/* Clear DbC run change bit: */
 882		if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
 883			writel(ctrl, &dbc->regs->control);
 884			ctrl = readl(&dbc->regs->control);
 885		}
 886		break;
 887	default:
 888		dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
 889		break;
 890	}
 891
 892	/* Handle the events in the event ring: */
 893	evt = dbc->ring_evt->dequeue;
 894	while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
 895			dbc->ring_evt->cycle_state) {
 896		/*
 897		 * Add a barrier between reading the cycle flag and any
 898		 * reads of the event's flags/data below:
 899		 */
 900		rmb();
 901
 902		trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic,
 903					    xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 904								 dbc->ring_evt->dequeue));
 905
 906		switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
 907		case TRB_TYPE(TRB_PORT_STATUS):
 908			dbc_handle_port_status(dbc, evt);
 909			break;
 910		case TRB_TYPE(TRB_TRANSFER):
 911			dbc_handle_xfer_event(dbc, evt);
 912			break;
 913		default:
 914			break;
 915		}
 916
 917		inc_evt_deq(dbc->ring_evt);
 918
 919		evt = dbc->ring_evt->dequeue;
 920		update_erdp = true;
 921	}
 922
 923	/* Update event ring dequeue pointer: */
 924	if (update_erdp) {
 925		deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
 926					   dbc->ring_evt->dequeue);
 927		lo_hi_writeq(deq, &dbc->regs->erdp);
 928	}
 929
 930	return EVT_DONE;
 931}
 932
 933static void xhci_dbc_handle_events(struct work_struct *work)
 934{
 935	enum evtreturn		evtr;
 936	struct xhci_dbc		*dbc;
 937	unsigned long		flags;
 938	unsigned int		poll_interval;
 939
 940	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
 941	poll_interval = dbc->poll_interval;
 942
 943	spin_lock_irqsave(&dbc->lock, flags);
 944	evtr = xhci_dbc_do_handle_events(dbc);
 945	spin_unlock_irqrestore(&dbc->lock, flags);
 946
 947	switch (evtr) {
 948	case EVT_GSER:
 949		if (dbc->driver->configure)
 950			dbc->driver->configure(dbc);
 951		break;
 952	case EVT_DISC:
 953		if (dbc->driver->disconnect)
 954			dbc->driver->disconnect(dbc);
 955		break;
 956	case EVT_DONE:
 957		/* set fast poll rate if there are pending data transfers */
 958		if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
 959		    !list_empty(&dbc->eps[BULK_IN].list_pending))
 960			poll_interval = 1;
 961		break;
 962	default:
 963		dev_info(dbc->dev, "stop handling dbc events\n");
 964		return;
 965	}
 966
 967	mod_delayed_work(system_wq, &dbc->event_work,
 968			 msecs_to_jiffies(poll_interval));
 969}
 970
 971static const char * const dbc_state_strings[DS_MAX] = {
 972	[DS_DISABLED] = "disabled",
 973	[DS_INITIALIZED] = "initialized",
 974	[DS_ENABLED] = "enabled",
 975	[DS_CONNECTED] = "connected",
 976	[DS_CONFIGURED] = "configured",
 977};
 978
 979static ssize_t dbc_show(struct device *dev,
 980			struct device_attribute *attr,
 981			char *buf)
 982{
 983	struct xhci_dbc		*dbc;
 984	struct xhci_hcd		*xhci;
 985
 986	xhci = hcd_to_xhci(dev_get_drvdata(dev));
 987	dbc = xhci->dbc;
 988
 989	if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
 990		return sysfs_emit(buf, "unknown\n");
 991
 992	return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
 993}
 994
 995static ssize_t dbc_store(struct device *dev,
 996			 struct device_attribute *attr,
 997			 const char *buf, size_t count)
 998{
 999	struct xhci_hcd		*xhci;
1000	struct xhci_dbc		*dbc;
1001
1002	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1003	dbc = xhci->dbc;
1004
1005	if (sysfs_streq(buf, "enable"))
1006		xhci_dbc_start(dbc);
1007	else if (sysfs_streq(buf, "disable"))
1008		xhci_dbc_stop(dbc);
1009	else
1010		return -EINVAL;
1011
1012	return count;
1013}
1014
1015static ssize_t dbc_idVendor_show(struct device *dev,
1016			    struct device_attribute *attr,
1017			    char *buf)
1018{
1019	struct xhci_dbc		*dbc;
1020	struct xhci_hcd		*xhci;
1021
1022	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1023	dbc = xhci->dbc;
1024
1025	return sysfs_emit(buf, "%04x\n", dbc->idVendor);
1026}
1027
1028static ssize_t dbc_idVendor_store(struct device *dev,
1029			     struct device_attribute *attr,
1030			     const char *buf, size_t size)
1031{
1032	struct xhci_dbc		*dbc;
1033	struct xhci_hcd		*xhci;
1034	void __iomem		*ptr;
1035	u16			value;
1036	u32			dev_info;
1037	int ret;
1038
1039	ret = kstrtou16(buf, 0, &value);
1040	if (ret)
1041		return ret;
1042
1043	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1044	dbc = xhci->dbc;
1045	if (dbc->state != DS_DISABLED)
1046		return -EBUSY;
1047
1048	dbc->idVendor = value;
1049	ptr = &dbc->regs->devinfo1;
1050	dev_info = readl(ptr);
1051	dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1052	writel(dev_info, ptr);
1053
1054	return size;
1055}
1056
1057static ssize_t dbc_idProduct_show(struct device *dev,
1058			    struct device_attribute *attr,
1059			    char *buf)
1060{
1061	struct xhci_dbc         *dbc;
1062	struct xhci_hcd         *xhci;
1063
1064	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1065	dbc = xhci->dbc;
1066
1067	return sysfs_emit(buf, "%04x\n", dbc->idProduct);
1068}
1069
1070static ssize_t dbc_idProduct_store(struct device *dev,
1071			     struct device_attribute *attr,
1072			     const char *buf, size_t size)
1073{
1074	struct xhci_dbc         *dbc;
1075	struct xhci_hcd         *xhci;
1076	void __iomem		*ptr;
1077	u32			dev_info;
1078	u16			value;
1079	int ret;
1080
1081	ret = kstrtou16(buf, 0, &value);
1082	if (ret)
1083		return ret;
1084
1085	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1086	dbc = xhci->dbc;
1087	if (dbc->state != DS_DISABLED)
1088		return -EBUSY;
1089
1090	dbc->idProduct = value;
1091	ptr = &dbc->regs->devinfo2;
1092	dev_info = readl(ptr);
1093	dev_info = (dev_info & ~(0xffffu)) | value;
1094	writel(dev_info, ptr);
1095	return size;
1096}
1097
1098static ssize_t dbc_bcdDevice_show(struct device *dev,
1099				   struct device_attribute *attr,
1100				   char *buf)
1101{
1102	struct xhci_dbc	*dbc;
1103	struct xhci_hcd	*xhci;
1104
1105	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1106	dbc = xhci->dbc;
1107
1108	return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
1109}
1110
1111static ssize_t dbc_bcdDevice_store(struct device *dev,
1112				    struct device_attribute *attr,
1113				    const char *buf, size_t size)
1114{
1115	struct xhci_dbc	*dbc;
1116	struct xhci_hcd	*xhci;
1117	void __iomem *ptr;
1118	u32 dev_info;
1119	u16 value;
1120	int ret;
1121
1122	ret = kstrtou16(buf, 0, &value);
1123	if (ret)
1124		return ret;
1125
1126	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1127	dbc = xhci->dbc;
1128	if (dbc->state != DS_DISABLED)
1129		return -EBUSY;
1130
1131	dbc->bcdDevice = value;
1132	ptr = &dbc->regs->devinfo2;
1133	dev_info = readl(ptr);
1134	dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1135	writel(dev_info, ptr);
1136
1137	return size;
1138}
1139
1140static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1141				 struct device_attribute *attr,
1142				 char *buf)
1143{
1144	struct xhci_dbc	*dbc;
1145	struct xhci_hcd	*xhci;
1146
1147	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1148	dbc = xhci->dbc;
1149
1150	return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
1151}
1152
1153static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1154				  struct device_attribute *attr,
1155				  const char *buf, size_t size)
1156{
1157	struct xhci_dbc *dbc;
1158	struct xhci_hcd *xhci;
1159	void __iomem *ptr;
1160	u32 dev_info;
1161	u8 value;
1162	int ret;
1163
1164	/* bInterfaceProtocol is 8 bit, but... */
1165	ret = kstrtou8(buf, 0, &value);
1166	if (ret)
1167		return ret;
1168
1169	/* ...xhci only supports values 0 and 1 */
1170	if (value > 1)
1171		return -EINVAL;
1172
1173	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1174	dbc = xhci->dbc;
1175	if (dbc->state != DS_DISABLED)
1176		return -EBUSY;
1177
1178	dbc->bInterfaceProtocol = value;
1179	ptr = &dbc->regs->devinfo1;
1180	dev_info = readl(ptr);
1181	dev_info = (dev_info & ~(0xffu)) | value;
1182	writel(dev_info, ptr);
1183
1184	return size;
1185}
1186
1187static ssize_t dbc_poll_interval_ms_show(struct device *dev,
1188					 struct device_attribute *attr,
1189					 char *buf)
1190{
1191	struct xhci_dbc *dbc;
1192	struct xhci_hcd *xhci;
1193
1194	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1195	dbc = xhci->dbc;
1196
1197	return sysfs_emit(buf, "%u\n", dbc->poll_interval);
1198}
1199
1200static ssize_t dbc_poll_interval_ms_store(struct device *dev,
1201					  struct device_attribute *attr,
1202					  const char *buf, size_t size)
1203{
1204	struct xhci_dbc *dbc;
1205	struct xhci_hcd *xhci;
1206	u32 value;
1207	int ret;
1208
1209	ret = kstrtou32(buf, 0, &value);
1210	if (ret || value > DBC_POLL_INTERVAL_MAX)
1211		return -EINVAL;
1212
1213	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1214	dbc = xhci->dbc;
1215
1216	dbc->poll_interval = value;
1217
1218	mod_delayed_work(system_wq, &dbc->event_work, 0);
1219
1220	return size;
1221}
1222
1223static DEVICE_ATTR_RW(dbc);
1224static DEVICE_ATTR_RW(dbc_idVendor);
1225static DEVICE_ATTR_RW(dbc_idProduct);
1226static DEVICE_ATTR_RW(dbc_bcdDevice);
1227static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1228static DEVICE_ATTR_RW(dbc_poll_interval_ms);
1229
1230static struct attribute *dbc_dev_attrs[] = {
1231	&dev_attr_dbc.attr,
1232	&dev_attr_dbc_idVendor.attr,
1233	&dev_attr_dbc_idProduct.attr,
1234	&dev_attr_dbc_bcdDevice.attr,
1235	&dev_attr_dbc_bInterfaceProtocol.attr,
1236	&dev_attr_dbc_poll_interval_ms.attr,
1237	NULL
1238};
1239ATTRIBUTE_GROUPS(dbc_dev);
1240
1241struct xhci_dbc *
1242xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1243{
1244	struct xhci_dbc		*dbc;
1245	int			ret;
1246
1247	dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1248	if (!dbc)
1249		return NULL;
1250
1251	dbc->regs = base;
1252	dbc->dev = dev;
1253	dbc->driver = driver;
1254	dbc->idProduct = DBC_PRODUCT_ID;
1255	dbc->idVendor = DBC_VENDOR_ID;
1256	dbc->bcdDevice = DBC_DEVICE_REV;
1257	dbc->bInterfaceProtocol = DBC_PROTOCOL;
1258	dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
1259
1260	if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1261		goto err;
1262
1263	INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1264	spin_lock_init(&dbc->lock);
1265
1266	ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
1267	if (ret)
1268		goto err;
1269
1270	return dbc;
1271err:
1272	kfree(dbc);
1273	return NULL;
1274}
1275
1276/* undo what xhci_alloc_dbc() did */
1277void xhci_dbc_remove(struct xhci_dbc *dbc)
1278{
1279	if (!dbc)
1280		return;
1281	/* stop hw, stop wq and call dbc->ops->stop() */
1282	xhci_dbc_stop(dbc);
1283
1284	/* remove sysfs files */
1285	sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
1286
1287	kfree(dbc);
1288}
1289
1290
1291int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1292{
1293	struct device		*dev;
1294	void __iomem		*base;
1295	int			ret;
1296	int			dbc_cap_offs;
1297
1298	/* create all parameters needed resembling a dbc device */
1299	dev = xhci_to_hcd(xhci)->self.controller;
1300	base = &xhci->cap_regs->hc_capbase;
1301
1302	dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1303	if (!dbc_cap_offs)
1304		return -ENODEV;
1305
1306	/* already allocated and in use */
1307	if (xhci->dbc)
1308		return -EBUSY;
1309
1310	ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1311
1312	return ret;
1313}
1314
1315void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1316{
1317	unsigned long		flags;
1318
1319	if (!xhci->dbc)
1320		return;
1321
1322	xhci_dbc_tty_remove(xhci->dbc);
1323	spin_lock_irqsave(&xhci->lock, flags);
1324	xhci->dbc = NULL;
1325	spin_unlock_irqrestore(&xhci->lock, flags);
1326}
1327
1328#ifdef CONFIG_PM
1329int xhci_dbc_suspend(struct xhci_hcd *xhci)
1330{
1331	struct xhci_dbc		*dbc = xhci->dbc;
1332
1333	if (!dbc)
1334		return 0;
1335
1336	if (dbc->state == DS_CONFIGURED)
1337		dbc->resume_required = 1;
1338
1339	xhci_dbc_stop(dbc);
1340
1341	return 0;
1342}
1343
1344int xhci_dbc_resume(struct xhci_hcd *xhci)
1345{
1346	int			ret = 0;
1347	struct xhci_dbc		*dbc = xhci->dbc;
1348
1349	if (!dbc)
1350		return 0;
1351
1352	if (dbc->resume_required) {
1353		dbc->resume_required = 0;
1354		xhci_dbc_start(dbc);
1355	}
1356
1357	return ret;
1358}
1359#endif /* CONFIG_PM */
1360
1361int xhci_dbc_init(void)
1362{
1363	return dbc_tty_init();
1364}
1365
1366void xhci_dbc_exit(void)
1367{
1368	dbc_tty_exit();
1369}