Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2015 MediaTek Inc.
   4 * Author:
   5 *  Zhigang.Wei <zhigang.wei@mediatek.com>
   6 *  Chunfeng.Yun <chunfeng.yun@mediatek.com>
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/slab.h>
  12
  13#include "xhci.h"
  14#include "xhci-mtk.h"
  15
  16#define SSP_BW_BOUNDARY	130000
  17#define SS_BW_BOUNDARY	51000
  18/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
  19#define HS_BW_BOUNDARY	6144
  20/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
  21#define FS_PAYLOAD_MAX 188
  22#define LS_PAYLOAD_MAX 18
  23/* section 11.18.1, per fs frame */
  24#define FS_BW_BOUNDARY	1157
  25#define LS_BW_BOUNDARY	144
  26
  27/*
  28 * max number of microframes for split transfer, assume extra-cs budget is 0
  29 * for fs isoc in : 1 ss + 1 idle + 6 cs (roundup(1023/188))
  30 */
  31#define TT_MICROFRAMES_MAX	8
  32/* offset from SS for fs/ls isoc/intr ep (ss + idle) */
  33#define CS_OFFSET	2
  34
  35#define DBG_BUF_EN	64
  36
  37/* schedule error type */
  38#define ESCH_SS_Y6		1001
  39#define ESCH_SS_OVERLAP		1002
  40#define ESCH_CS_OVERFLOW	1003
  41#define ESCH_BW_OVERFLOW	1004
  42#define ESCH_FIXME		1005
  43
  44/* mtk scheduler bitmasks */
  45#define EP_BPKTS(p)	((p) & 0x7f)
  46#define EP_BCSCOUNT(p)	(((p) & 0x7) << 8)
  47#define EP_BBM(p)	((p) << 11)
  48#define EP_BOFFSET(p)	((p) & 0x3fff)
  49#define EP_BREPEAT(p)	(((p) & 0x7fff) << 16)
  50
  51static char *sch_error_string(int err_num)
  52{
  53	switch (err_num) {
  54	case ESCH_SS_Y6:
  55		return "Can't schedule Start-Split in Y6";
  56	case ESCH_SS_OVERLAP:
  57		return "Can't find a suitable Start-Split location";
  58	case ESCH_CS_OVERFLOW:
  59		return "The last Complete-Split is greater than 7";
  60	case ESCH_BW_OVERFLOW:
  61		return "Bandwidth exceeds the maximum limit";
  62	case ESCH_FIXME:
  63		return "FIXME, to be resolved";
  64	default:
  65		return "Unknown";
  66	}
  67}
  68
  69static int is_fs_or_ls(enum usb_device_speed speed)
  70{
  71	return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
  72}
  73
  74static const char *
  75decode_ep(struct usb_host_endpoint *ep, enum usb_device_speed speed)
  76{
  77	static char buf[DBG_BUF_EN];
  78	struct usb_endpoint_descriptor *epd = &ep->desc;
  79	unsigned int interval;
  80	const char *unit;
  81
  82	interval = usb_decode_interval(epd, speed);
  83	if (interval % 1000) {
  84		unit = "us";
  85	} else {
  86		unit = "ms";
  87		interval /= 1000;
  88	}
  89
  90	snprintf(buf, DBG_BUF_EN, "%s ep%d%s %s, mpkt:%d, interval:%d/%d%s",
  91		 usb_speed_string(speed), usb_endpoint_num(epd),
  92		 usb_endpoint_dir_in(epd) ? "in" : "out",
  93		 usb_ep_type_string(usb_endpoint_type(epd)),
  94		 usb_endpoint_maxp(epd), epd->bInterval, interval, unit);
  95
  96	return buf;
  97}
  98
  99static u32 get_bw_boundary(enum usb_device_speed speed)
 100{
 101	u32 boundary;
 102
 103	switch (speed) {
 104	case USB_SPEED_SUPER_PLUS:
 105		boundary = SSP_BW_BOUNDARY;
 106		break;
 107	case USB_SPEED_SUPER:
 108		boundary = SS_BW_BOUNDARY;
 109		break;
 110	default:
 111		boundary = HS_BW_BOUNDARY;
 112		break;
 113	}
 114
 115	return boundary;
 116}
 117
 118/*
 119* get the bandwidth domain which @ep belongs to.
 120*
 121* the bandwidth domain array is saved to @sch_array of struct xhci_hcd_mtk,
 122* each HS root port is treated as a single bandwidth domain,
 123* but each SS root port is treated as two bandwidth domains, one for IN eps,
 124* one for OUT eps.
 125* @real_port value is defined as follow according to xHCI spec:
 126* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
 127* so the bandwidth domain array is organized as follow for simplification:
 128* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
 129*/
 130static struct mu3h_sch_bw_info *
 131get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
 132	    struct usb_host_endpoint *ep)
 133{
 134	struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
 135	struct xhci_virt_device *virt_dev;
 136	int bw_index;
 137
 138	virt_dev = xhci->devs[udev->slot_id];
 139	if (!virt_dev->real_port) {
 140		WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
 141		return NULL;
 142	}
 143
 144	if (udev->speed >= USB_SPEED_SUPER) {
 145		if (usb_endpoint_dir_out(&ep->desc))
 146			bw_index = (virt_dev->real_port - 1) * 2;
 147		else
 148			bw_index = (virt_dev->real_port - 1) * 2 + 1;
 149	} else {
 150		/* add one more for each SS port */
 151		bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
 152	}
 153
 154	return &mtk->sch_array[bw_index];
 155}
 156
 157static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
 158{
 159	u32 esit;
 160
 161	esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
 162	if (esit > XHCI_MTK_MAX_ESIT)
 163		esit = XHCI_MTK_MAX_ESIT;
 164
 165	return esit;
 166}
 167
 168static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
 169{
 170	struct usb_tt *utt = udev->tt;
 171	struct mu3h_sch_tt *tt, **tt_index, **ptt;
 
 172	bool allocated_index = false;
 173
 174	if (!utt)
 175		return NULL;	/* Not below a TT */
 176
 177	/*
 178	 * Find/create our data structure.
 179	 * For hubs with a single TT, we get it directly.
 180	 * For hubs with multiple TTs, there's an extra level of pointers.
 181	 */
 182	tt_index = NULL;
 183	if (utt->multi) {
 184		tt_index = utt->hcpriv;
 185		if (!tt_index) {	/* Create the index array */
 186			tt_index = kcalloc(utt->hub->maxchild,
 187					sizeof(*tt_index), GFP_KERNEL);
 188			if (!tt_index)
 189				return ERR_PTR(-ENOMEM);
 190			utt->hcpriv = tt_index;
 191			allocated_index = true;
 192		}
 193		ptt = &tt_index[udev->ttport - 1];
 
 194	} else {
 
 195		ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
 196	}
 197
 198	tt = *ptt;
 199	if (!tt) {	/* Create the mu3h_sch_tt */
 200		tt = kzalloc(sizeof(*tt), GFP_KERNEL);
 201		if (!tt) {
 202			if (allocated_index) {
 203				utt->hcpriv = NULL;
 204				kfree(tt_index);
 205			}
 206			return ERR_PTR(-ENOMEM);
 207		}
 208		INIT_LIST_HEAD(&tt->ep_list);
 
 
 209		*ptt = tt;
 210	}
 211
 212	return tt;
 213}
 214
 215/* Release the TT above udev, if it's not in use */
 216static void drop_tt(struct usb_device *udev)
 217{
 218	struct usb_tt *utt = udev->tt;
 219	struct mu3h_sch_tt *tt, **tt_index, **ptt;
 220	int i, cnt;
 221
 222	if (!utt || !utt->hcpriv)
 223		return;		/* Not below a TT, or never allocated */
 224
 225	cnt = 0;
 226	if (utt->multi) {
 227		tt_index = utt->hcpriv;
 228		ptt = &tt_index[udev->ttport - 1];
 229		/*  How many entries are left in tt_index? */
 230		for (i = 0; i < utt->hub->maxchild; ++i)
 231			cnt += !!tt_index[i];
 232	} else {
 233		tt_index = NULL;
 234		ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
 235	}
 236
 237	tt = *ptt;
 238	if (!tt || !list_empty(&tt->ep_list))
 239		return;		/* never allocated , or still in use*/
 240
 241	*ptt = NULL;
 242	kfree(tt);
 243
 244	if (cnt == 1) {
 245		utt->hcpriv = NULL;
 246		kfree(tt_index);
 247	}
 248}
 249
 250static struct mu3h_sch_ep_info *
 251create_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
 252	      struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
 253{
 254	struct mu3h_sch_ep_info *sch_ep;
 255	struct mu3h_sch_bw_info *bw_info;
 256	struct mu3h_sch_tt *tt = NULL;
 257	u32 len;
 258
 259	bw_info = get_bw_info(mtk, udev, ep);
 260	if (!bw_info)
 261		return ERR_PTR(-ENODEV);
 262
 263	if (is_fs_or_ls(udev->speed))
 264		len = TT_MICROFRAMES_MAX;
 265	else if ((udev->speed >= USB_SPEED_SUPER) &&
 266		 usb_endpoint_xfer_isoc(&ep->desc))
 267		len = get_esit(ep_ctx);
 268	else
 269		len = 1;
 270
 271	sch_ep = kzalloc(struct_size(sch_ep, bw_budget_table, len), GFP_KERNEL);
 
 
 272	if (!sch_ep)
 273		return ERR_PTR(-ENOMEM);
 274
 275	if (is_fs_or_ls(udev->speed)) {
 276		tt = find_tt(udev);
 277		if (IS_ERR(tt)) {
 278			kfree(sch_ep);
 279			return ERR_PTR(-ENOMEM);
 280		}
 281	}
 282
 283	sch_ep->bw_info = bw_info;
 284	sch_ep->sch_tt = tt;
 285	sch_ep->ep = ep;
 286	sch_ep->speed = udev->speed;
 287	INIT_LIST_HEAD(&sch_ep->endpoint);
 288	INIT_LIST_HEAD(&sch_ep->tt_endpoint);
 289	INIT_HLIST_NODE(&sch_ep->hentry);
 290
 291	return sch_ep;
 292}
 293
 294static void setup_sch_info(struct xhci_ep_ctx *ep_ctx,
 295			   struct mu3h_sch_ep_info *sch_ep)
 296{
 297	u32 ep_type;
 298	u32 maxpkt;
 299	u32 max_burst;
 300	u32 mult;
 301	u32 esit_pkts;
 302	u32 max_esit_payload;
 303	u32 bw_per_microframe;
 304	u32 *bwb_table;
 305	int i;
 306
 307	bwb_table = sch_ep->bw_budget_table;
 308	ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
 309	maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
 310	max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
 311	mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
 312	max_esit_payload =
 313		(CTX_TO_MAX_ESIT_PAYLOAD_HI(
 314			le32_to_cpu(ep_ctx->ep_info)) << 16) |
 315		 CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
 316
 317	sch_ep->esit = get_esit(ep_ctx);
 318	sch_ep->num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
 319	sch_ep->ep_type = ep_type;
 320	sch_ep->maxpkt = maxpkt;
 321	sch_ep->offset = 0;
 322	sch_ep->burst_mode = 0;
 323	sch_ep->repeat = 0;
 324
 325	if (sch_ep->speed == USB_SPEED_HIGH) {
 326		sch_ep->cs_count = 0;
 327
 328		/*
 329		 * usb_20 spec section5.9
 330		 * a single microframe is enough for HS synchromous endpoints
 331		 * in a interval
 332		 */
 333		sch_ep->num_budget_microframes = 1;
 334
 335		/*
 336		 * xHCI spec section6.2.3.4
 337		 * @max_burst is the number of additional transactions
 338		 * opportunities per microframe
 339		 */
 340		sch_ep->pkts = max_burst + 1;
 341		bwb_table[0] = maxpkt * sch_ep->pkts;
 342	} else if (sch_ep->speed >= USB_SPEED_SUPER) {
 
 343		/* usb3_r1 spec section4.4.7 & 4.4.8 */
 344		sch_ep->cs_count = 0;
 345		sch_ep->burst_mode = 1;
 346		/*
 347		 * some device's (d)wBytesPerInterval is set as 0,
 348		 * then max_esit_payload is 0, so evaluate esit_pkts from
 349		 * mult and burst
 350		 */
 351		esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
 352		if (esit_pkts == 0)
 353			esit_pkts = (mult + 1) * (max_burst + 1);
 354
 355		if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
 356			sch_ep->pkts = esit_pkts;
 357			sch_ep->num_budget_microframes = 1;
 358			bwb_table[0] = maxpkt * sch_ep->pkts;
 359		}
 360
 361		if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
 
 362
 363			if (sch_ep->esit == 1)
 364				sch_ep->pkts = esit_pkts;
 365			else if (esit_pkts <= sch_ep->esit)
 366				sch_ep->pkts = 1;
 367			else
 368				sch_ep->pkts = roundup_pow_of_two(esit_pkts)
 369					/ sch_ep->esit;
 370
 371			sch_ep->num_budget_microframes =
 372				DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
 373
 374			sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
 375			bw_per_microframe = maxpkt * sch_ep->pkts;
 376
 
 
 
 377			for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
 378				bwb_table[i] = bw_per_microframe;
 379
 380			/* last one <= bw_per_microframe */
 381			bwb_table[i] = maxpkt * esit_pkts - i * bw_per_microframe;
 382		}
 383	} else if (is_fs_or_ls(sch_ep->speed)) {
 384		sch_ep->pkts = 1; /* at most one packet for each microframe */
 385
 386		/*
 387		 * @cs_count will be updated to add extra-cs when
 388		 * check TT for INT_OUT_EP, ISOC/INT_IN_EP type
 389		 * @maxpkt <= 1023;
 390		 */
 391		sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
 392		sch_ep->num_budget_microframes = sch_ep->cs_count;
 
 
 393
 394		/* init budget table */
 395		if (ep_type == ISOC_OUT_EP) {
 396			for (i = 0; i < sch_ep->cs_count - 1; i++)
 397				bwb_table[i] = FS_PAYLOAD_MAX;
 398
 399			bwb_table[i] = maxpkt - i * FS_PAYLOAD_MAX;
 400		} else if (ep_type == INT_OUT_EP) {
 401			/* only first one used (maxpkt <= 64), others zero */
 402			bwb_table[0] = maxpkt;
 403		} else { /* INT_IN_EP or ISOC_IN_EP */
 404			bwb_table[0] = 0; /* start split */
 405			bwb_table[1] = 0; /* idle */
 406			/*
 407			 * @cs_count will be updated according to cs position
 408			 * (add 1 or 2 extra-cs), but assume only first
 409			 * @num_budget_microframes elements will be used later,
 410			 * although in fact it does not (extra-cs budget many receive
 411			 * some data for IN ep);
 412			 * @cs_count is 1 for INT_IN_EP (maxpkt <= 64);
 413			 */
 414			for (i = 0; i < sch_ep->cs_count - 1; i++)
 415				bwb_table[i + CS_OFFSET] = FS_PAYLOAD_MAX;
 416
 417			bwb_table[i + CS_OFFSET] = maxpkt - i * FS_PAYLOAD_MAX;
 418			/* ss + idle */
 419			sch_ep->num_budget_microframes += CS_OFFSET;
 420		}
 421	}
 422}
 423
 424/* Get maximum bandwidth when we schedule at offset slot. */
 425static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
 426	struct mu3h_sch_ep_info *sch_ep, u32 offset)
 427{
 
 428	u32 max_bw = 0;
 429	u32 bw;
 430	int i, j, k;
 
 431
 432	for (i = 0; i < sch_ep->num_esit; i++) {
 
 433		u32 base = offset + i * sch_ep->esit;
 434
 435		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
 436			k = XHCI_MTK_BW_INDEX(base + j);
 437			bw = sch_bw->bus_bw[k] + sch_ep->bw_budget_table[j];
 438			if (bw > max_bw)
 439				max_bw = bw;
 440		}
 441	}
 442	return max_bw;
 443}
 444
 445/*
 446 * for OUT: get first SS consumed bw;
 447 * for IN: get first CS consumed bw;
 448 */
 449static u16 get_fs_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
 450{
 451	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
 452	u16 fs_bw;
 453
 454	if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP)
 455		fs_bw = tt->fs_bus_bw_out[XHCI_MTK_BW_INDEX(offset)];
 456	else	/* skip ss + idle */
 457		fs_bw = tt->fs_bus_bw_in[XHCI_MTK_BW_INDEX(offset + CS_OFFSET)];
 458
 459	return fs_bw;
 460}
 461
 462static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
 463	struct mu3h_sch_ep_info *sch_ep, bool used)
 464{
 
 465	u32 base;
 466	int i, j, k;
 
 467
 468	for (i = 0; i < sch_ep->num_esit; i++) {
 
 469		base = sch_ep->offset + i * sch_ep->esit;
 470		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
 471			k = XHCI_MTK_BW_INDEX(base + j);
 472			if (used)
 473				sch_bw->bus_bw[k] += sch_ep->bw_budget_table[j];
 
 474			else
 475				sch_bw->bus_bw[k] -= sch_ep->bw_budget_table[j];
 
 476		}
 477	}
 478}
 479
 480static int check_ls_budget_microframes(struct mu3h_sch_ep_info *sch_ep, int offset)
 481{
 482	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
 483	int i;
 484
 485	if (sch_ep->speed != USB_SPEED_LOW)
 486		return 0;
 487
 488	if (sch_ep->ep_type == INT_OUT_EP)
 489		i = XHCI_MTK_BW_INDEX(offset);
 490	else if (sch_ep->ep_type == INT_IN_EP)
 491		i = XHCI_MTK_BW_INDEX(offset + CS_OFFSET); /* skip ss + idle */
 492	else
 493		return -EINVAL;
 494
 495	if (tt->ls_bus_bw[i] + sch_ep->maxpkt > LS_PAYLOAD_MAX)
 496		return -ESCH_BW_OVERFLOW;
 497
 498	return 0;
 499}
 500
 501static int check_fs_budget_microframes(struct mu3h_sch_ep_info *sch_ep, int offset)
 502{
 503	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
 504	u32 tmp;
 505	int i, k;
 506
 507	/*
 508	 * for OUT eps, will transfer exactly assigned length of data,
 509	 * so can't allocate more than 188 bytes;
 510	 * but it's not for IN eps, usually it can't receive full
 511	 * 188 bytes in a uframe, if it not assign full 188 bytes,
 512	 * can add another one;
 513	 */
 514	for (i = 0; i < sch_ep->num_budget_microframes; i++) {
 515		k = XHCI_MTK_BW_INDEX(offset + i);
 516		if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP)
 517			tmp = tt->fs_bus_bw_out[k] + sch_ep->bw_budget_table[i];
 518		else /* ep_type : ISOC IN / INTR IN */
 519			tmp = tt->fs_bus_bw_in[k];
 520
 521		if (tmp > FS_PAYLOAD_MAX)
 522			return -ESCH_BW_OVERFLOW;
 523	}
 524
 525	return 0;
 526}
 527
 528static int check_fs_budget_frames(struct mu3h_sch_ep_info *sch_ep, int offset)
 529{
 530	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
 531	u32 head, tail;
 532	int i, j, k;
 533
 534	/* bugdet scheduled may cross at most two fs frames */
 535	j = XHCI_MTK_BW_INDEX(offset) / UFRAMES_PER_FRAME;
 536	k = XHCI_MTK_BW_INDEX(offset + sch_ep->num_budget_microframes - 1) / UFRAMES_PER_FRAME;
 537
 538	if (j != k) {
 539		head = tt->fs_frame_bw[j];
 540		tail = tt->fs_frame_bw[k];
 541	} else {
 542		head = tt->fs_frame_bw[j];
 543		tail = 0;
 544	}
 545
 546	j = roundup(offset, UFRAMES_PER_FRAME);
 547	for (i = 0; i < sch_ep->num_budget_microframes; i++) {
 548		if ((offset + i) < j)
 549			head += sch_ep->bw_budget_table[i];
 550		else
 551			tail += sch_ep->bw_budget_table[i];
 552	}
 553
 554	if (head > FS_BW_BOUNDARY || tail > FS_BW_BOUNDARY)
 555		return -ESCH_BW_OVERFLOW;
 556
 557	return 0;
 558}
 559
 560static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
 561{
 562	int i, base;
 563	int ret = 0;
 564
 565	for (i = 0; i < sch_ep->num_esit; i++) {
 566		base = offset + i * sch_ep->esit;
 567
 568		ret = check_ls_budget_microframes(sch_ep, base);
 569		if (ret)
 570			goto err;
 571
 572		ret = check_fs_budget_microframes(sch_ep, base);
 573		if (ret)
 574			goto err;
 575
 576		ret = check_fs_budget_frames(sch_ep, base);
 577		if (ret)
 578			goto err;
 579	}
 580
 581err:
 582	return ret;
 583}
 584
 585static int check_ss_and_cs(struct mu3h_sch_ep_info *sch_ep, u32 offset)
 586{
 587	u32 start_ss, last_ss;
 588	u32 start_cs, last_cs;
 
 589
 590	start_ss = offset % UFRAMES_PER_FRAME;
 
 591
 592	if (sch_ep->ep_type == ISOC_OUT_EP) {
 593		last_ss = start_ss + sch_ep->cs_count - 1;
 594
 595		/*
 596		 * usb_20 spec section11.18:
 597		 * must never schedule Start-Split in Y6
 598		 */
 599		if (!(start_ss == 7 || last_ss < 6))
 600			return -ESCH_SS_Y6;
 
 
 
 
 601
 602	} else {
 603		/* maxpkt <= 1023, cs <= 6 */
 604		u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
 605
 606		/*
 607		 * usb_20 spec section11.18:
 608		 * must never schedule Start-Split in Y6
 609		 */
 610		if (start_ss == 6)
 611			return -ESCH_SS_Y6;
 612
 613		/* one uframe for ss + one uframe for idle */
 614		start_cs = (start_ss + CS_OFFSET) % UFRAMES_PER_FRAME;
 615		last_cs = start_cs + cs_count - 1;
 
 616		if (last_cs > 7)
 617			return -ESCH_CS_OVERFLOW;
 
 
 
 
 
 618
 619		/* add extra-cs */
 620		cs_count += (last_cs == 7) ? 1 : 2;
 621		if (cs_count > 7)
 622			cs_count = 7; /* HW limit */
 623
 624		sch_ep->cs_count = cs_count;
 625
 626	}
 627
 628	return 0;
 629}
 630
 631/*
 632 * when isoc-out transfers 188 bytes in a uframe, and send isoc/intr's
 633 * ss token in the uframe, may cause 'bit stuff error' in downstream
 634 * port;
 635 * when isoc-out transfer less than 188 bytes in a uframe, shall send
 636 * isoc-in's ss after isoc-out's ss (but hw can't ensure the sequence,
 637 * so just avoid overlap).
 638 */
 639static int check_isoc_ss_overlap(struct mu3h_sch_ep_info *sch_ep, u32 offset)
 640{
 641	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
 642	int base;
 643	int i, j, k;
 644
 645	if (!tt)
 646		return 0;
 647
 648	for (i = 0; i < sch_ep->num_esit; i++) {
 649		base = offset + i * sch_ep->esit;
 650
 651		if (sch_ep->ep_type == ISOC_OUT_EP) {
 652			for (j = 0; j < sch_ep->num_budget_microframes; j++) {
 653				k = XHCI_MTK_BW_INDEX(base + j);
 654				if (tt->in_ss_cnt[k])
 655					return -ESCH_SS_OVERLAP;
 656			}
 657		} else if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
 658			k = XHCI_MTK_BW_INDEX(base);
 659			/* only check IN's ss */
 660			if (tt->fs_bus_bw_out[k])
 661				return -ESCH_SS_OVERLAP;
 662		}
 663	}
 664
 665	return 0;
 666}
 667
 668static int check_sch_tt_budget(struct mu3h_sch_ep_info *sch_ep, u32 offset)
 669{
 670	int ret;
 671
 672	ret = check_ss_and_cs(sch_ep, offset);
 673	if (ret)
 674		return ret;
 675
 676	ret = check_isoc_ss_overlap(sch_ep, offset);
 677	if (ret)
 678		return ret;
 679
 680	return check_fs_bus_bw(sch_ep, offset);
 681}
 682
 683/* allocate microframes in the ls/fs frame */
 684static int alloc_sch_portion_of_frame(struct mu3h_sch_ep_info *sch_ep)
 685{
 686	struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
 687	const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
 688	u32 bw_max, fs_bw_min;
 689	u32 offset, offset_min;
 690	u16 fs_bw;
 691	int frames;
 692	int i, j;
 693	int ret;
 694
 695	frames = sch_ep->esit / UFRAMES_PER_FRAME;
 696
 697	for (i = 0; i < UFRAMES_PER_FRAME; i++) {
 698		fs_bw_min = FS_PAYLOAD_MAX;
 699		offset_min = XHCI_MTK_MAX_ESIT;
 700
 701		for (j = 0; j < frames; j++) {
 702			offset = (i + j * UFRAMES_PER_FRAME) % sch_ep->esit;
 703
 704			ret = check_sch_tt_budget(sch_ep, offset);
 705			if (ret)
 706				continue;
 707
 708			/* check hs bw domain */
 709			bw_max = get_max_bw(sch_bw, sch_ep, offset);
 710			if (bw_max > bw_boundary) {
 711				ret = -ESCH_BW_OVERFLOW;
 712				continue;
 713			}
 714
 715			/* use best-fit between frames */
 716			fs_bw = get_fs_bw(sch_ep, offset);
 717			if (fs_bw < fs_bw_min) {
 718				fs_bw_min = fs_bw;
 719				offset_min = offset;
 720			}
 721
 722			if (!fs_bw_min)
 723				break;
 724		}
 725
 726		/* use first-fit between microframes in a frame */
 727		if (offset_min < XHCI_MTK_MAX_ESIT)
 728			break;
 
 
 
 729	}
 730
 731	if (offset_min == XHCI_MTK_MAX_ESIT)
 732		return -ESCH_BW_OVERFLOW;
 733
 734	sch_ep->offset = offset_min;
 735
 736	return 0;
 737}
 738
 739static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
 
 740{
 741	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
 742	u16 *fs_bus_bw;
 743	u32 base;
 744	int i, j, k, f;
 745
 746	if (sch_ep->ep_type == ISOC_OUT_EP || sch_ep->ep_type == INT_OUT_EP)
 747		fs_bus_bw = tt->fs_bus_bw_out;
 748	else
 749		fs_bus_bw = tt->fs_bus_bw_in;
 750
 751	for (i = 0; i < sch_ep->num_esit; i++) {
 
 752		base = sch_ep->offset + i * sch_ep->esit;
 753
 754		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
 755			k = XHCI_MTK_BW_INDEX(base + j);
 756			f = k / UFRAMES_PER_FRAME;
 757			if (used) {
 758				if (sch_ep->speed == USB_SPEED_LOW)
 759					tt->ls_bus_bw[k] += (u8)sch_ep->bw_budget_table[j];
 760
 761				fs_bus_bw[k] += (u16)sch_ep->bw_budget_table[j];
 762				tt->fs_frame_bw[f] += (u16)sch_ep->bw_budget_table[j];
 763			} else {
 764				if (sch_ep->speed == USB_SPEED_LOW)
 765					tt->ls_bus_bw[k] -= (u8)sch_ep->bw_budget_table[j];
 766
 767				fs_bus_bw[k] -= (u16)sch_ep->bw_budget_table[j];
 768				tt->fs_frame_bw[f] -= (u16)sch_ep->bw_budget_table[j];
 769			}
 770		}
 771
 772		if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
 773			k = XHCI_MTK_BW_INDEX(base);
 774			if (used)
 775				tt->in_ss_cnt[k]++;
 776			else
 777				tt->in_ss_cnt[k]--;
 778		}
 779	}
 780
 781	if (used)
 782		list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
 783	else
 784		list_del(&sch_ep->tt_endpoint);
 785}
 786
 787static int load_ep_bw(struct mu3h_sch_bw_info *sch_bw,
 788		      struct mu3h_sch_ep_info *sch_ep, bool loaded)
 789{
 790	if (sch_ep->sch_tt)
 791		update_sch_tt(sch_ep, loaded);
 792
 793	/* update bus bandwidth info */
 794	update_bus_bw(sch_bw, sch_ep, loaded);
 795	sch_ep->allocated = loaded;
 796
 797	return 0;
 798}
 799
 800/* allocate microframes for hs/ss/ssp */
 801static int alloc_sch_microframes(struct mu3h_sch_ep_info *sch_ep)
 802{
 803	struct mu3h_sch_bw_info *sch_bw = sch_ep->bw_info;
 804	const u32 bw_boundary = get_bw_boundary(sch_ep->speed);
 805	u32 offset;
 
 
 
 806	u32 worst_bw;
 807	u32 min_bw = ~0;
 808	int min_index = -1;
 
 
 
 
 
 809
 810	/*
 811	 * Search through all possible schedule microframes.
 812	 * and find a microframe where its worst bandwidth is minimum.
 813	 */
 814	for (offset = 0; offset < sch_ep->esit; offset++) {
 
 
 
 
 
 
 
 
 
 
 
 815
 816		worst_bw = get_max_bw(sch_bw, sch_ep, offset);
 817		if (worst_bw > bw_boundary)
 818			continue;
 819
 
 820		if (min_bw > worst_bw) {
 821			min_bw = worst_bw;
 822			min_index = offset;
 
 
 823		}
 
 
 824	}
 825
 826	if (min_index < 0)
 827		return -ESCH_BW_OVERFLOW;
 828
 829	sch_ep->offset = min_index;
 
 
 830
 831	return 0;
 832}
 
 833
 834static int check_sch_bw(struct mu3h_sch_ep_info *sch_ep)
 835{
 836	int ret;
 837
 838	if (sch_ep->sch_tt)
 839		ret = alloc_sch_portion_of_frame(sch_ep);
 840	else
 841		ret = alloc_sch_microframes(sch_ep);
 842
 843	if (ret)
 844		return ret;
 845
 846	return load_ep_bw(sch_ep->bw_info, sch_ep, true);
 847}
 848
 849static void destroy_sch_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
 850			   struct mu3h_sch_ep_info *sch_ep)
 851{
 852	/* only release ep bw check passed by check_sch_bw() */
 853	if (sch_ep->allocated)
 854		load_ep_bw(sch_ep->bw_info, sch_ep, false);
 855
 856	if (sch_ep->sch_tt)
 857		drop_tt(udev);
 858
 859	list_del(&sch_ep->endpoint);
 860	hlist_del(&sch_ep->hentry);
 861	kfree(sch_ep);
 862}
 863
 864static bool need_bw_sch(struct usb_device *udev,
 865			struct usb_host_endpoint *ep)
 866{
 867	bool has_tt = udev->tt && udev->tt->hub->parent;
 868
 869	/* only for periodic endpoints */
 870	if (usb_endpoint_xfer_control(&ep->desc)
 871		|| usb_endpoint_xfer_bulk(&ep->desc))
 872		return false;
 873
 874	/*
 875	 * for LS & FS periodic endpoints which its device is not behind
 876	 * a TT are also ignored, root-hub will schedule them directly,
 877	 * but need set @bpkts field of endpoint context to 1.
 878	 */
 879	if (is_fs_or_ls(udev->speed) && !has_tt)
 880		return false;
 881
 882	/* skip endpoint with zero maxpkt */
 883	if (usb_endpoint_maxp(&ep->desc) == 0)
 884		return false;
 885
 886	return true;
 887}
 888
 889int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
 890{
 891	struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
 892	struct mu3h_sch_bw_info *sch_array;
 893	int num_usb_bus;
 
 894
 895	/* ss IN and OUT are separated */
 896	num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
 897
 898	sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
 899	if (sch_array == NULL)
 900		return -ENOMEM;
 901
 902	mtk->sch_array = sch_array;
 
 903
 904	INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
 905	hash_init(mtk->sch_ep_hash);
 906
 907	return 0;
 908}
 
 909
 910void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk)
 911{
 912	kfree(mtk->sch_array);
 913}
 
 914
 915static int add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
 916			struct usb_host_endpoint *ep)
 917{
 918	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
 919	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 920	struct xhci_ep_ctx *ep_ctx;
 
 921	struct xhci_virt_device *virt_dev;
 
 922	struct mu3h_sch_ep_info *sch_ep;
 
 923	unsigned int ep_index;
 
 
 924
 
 925	virt_dev = xhci->devs[udev->slot_id];
 926	ep_index = xhci_get_endpoint_index(&ep->desc);
 
 927	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
 
 
 
 
 
 
 928
 929	if (!need_bw_sch(udev, ep)) {
 930		/*
 931		 * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
 932		 * device does not connected through an external HS hub
 933		 */
 934		if (usb_endpoint_xfer_int(&ep->desc)
 935			|| usb_endpoint_xfer_isoc(&ep->desc))
 936			ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
 937
 938		return 0;
 939	}
 940
 941	xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
 
 942
 943	sch_ep = create_sch_ep(mtk, udev, ep, ep_ctx);
 944	if (IS_ERR_OR_NULL(sch_ep))
 945		return -ENOMEM;
 946
 947	setup_sch_info(ep_ctx, sch_ep);
 948
 949	list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
 950	hash_add(mtk->sch_ep_hash, &sch_ep->hentry, (unsigned long)ep);
 951
 952	return 0;
 953}
 954
 955static void drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
 956			  struct usb_host_endpoint *ep)
 957{
 958	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
 959	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 960	struct mu3h_sch_ep_info *sch_ep;
 961	struct hlist_node *hn;
 962
 963	if (!need_bw_sch(udev, ep))
 964		return;
 965
 966	xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
 
 
 
 
 967
 968	hash_for_each_possible_safe(mtk->sch_ep_hash, sch_ep,
 969				    hn, hentry, (unsigned long)ep) {
 970		if (sch_ep->ep == ep) {
 971			destroy_sch_ep(mtk, udev, sch_ep);
 972			break;
 973		}
 974	}
 975}
 976
 977int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 978{
 979	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
 980	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 981	struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
 982	struct mu3h_sch_ep_info *sch_ep;
 983	int ret;
 984
 985	xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
 986
 987	list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
 988		struct xhci_ep_ctx *ep_ctx;
 989		struct usb_host_endpoint *ep = sch_ep->ep;
 990		unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
 991
 992		ret = check_sch_bw(sch_ep);
 993		if (ret) {
 994			xhci_err(xhci, "Not enough bandwidth! (%s)\n",
 995				 sch_error_string(-ret));
 996			return -ENOSPC;
 997		}
 998
 999		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1000		ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
1001			| EP_BCSCOUNT(sch_ep->cs_count)
1002			| EP_BBM(sch_ep->burst_mode));
1003		ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
1004			| EP_BREPEAT(sch_ep->repeat));
1005
1006		xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
1007			sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
1008			sch_ep->offset, sch_ep->repeat);
1009	}
1010
1011	ret = xhci_check_bandwidth(hcd, udev);
1012	if (!ret)
1013		list_del_init(&mtk->bw_ep_chk_list);
1014
1015	return ret;
1016}
 
1017
1018void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 
1019{
1020	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
1021	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1022	struct mu3h_sch_ep_info *sch_ep, *tmp;
1023
1024	xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
1025
1026	list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint)
1027		destroy_sch_ep(mtk, udev, sch_ep);
1028
1029	xhci_reset_bandwidth(hcd, udev);
1030}
1031
1032int xhci_mtk_add_ep(struct usb_hcd *hcd, struct usb_device *udev,
1033		    struct usb_host_endpoint *ep)
1034{
1035	int ret;
1036
1037	ret = xhci_add_endpoint(hcd, udev, ep);
1038	if (ret)
1039		return ret;
1040
1041	if (ep->hcpriv)
1042		ret = add_ep_quirk(hcd, udev, ep);
1043
1044	return ret;
1045}
 
 
1046
1047int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
1048		     struct usb_host_endpoint *ep)
1049{
1050	int ret;
1051
1052	ret = xhci_drop_endpoint(hcd, udev, ep);
1053	if (ret)
1054		return ret;
1055
1056	/* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
1057	drop_ep_quirk(hcd, udev, ep);
1058
1059	return 0;
 
 
 
 
 
 
 
 
 
 
 
1060}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2015 MediaTek Inc.
  4 * Author:
  5 *  Zhigang.Wei <zhigang.wei@mediatek.com>
  6 *  Chunfeng.Yun <chunfeng.yun@mediatek.com>
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12
 13#include "xhci.h"
 14#include "xhci-mtk.h"
 15
 16#define SSP_BW_BOUNDARY	130000
 17#define SS_BW_BOUNDARY	51000
 18/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
 19#define HS_BW_BOUNDARY	6144
 20/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
 21#define FS_PAYLOAD_MAX 188
 
 
 
 
 
 22/*
 23 * max number of microframes for split transfer,
 24 * for fs isoc in : 1 ss + 1 idle + 7 cs
 25 */
 26#define TT_MICROFRAMES_MAX 9
 
 
 
 
 
 
 
 
 
 
 
 27
 28/* mtk scheduler bitmasks */
 29#define EP_BPKTS(p)	((p) & 0x7f)
 30#define EP_BCSCOUNT(p)	(((p) & 0x7) << 8)
 31#define EP_BBM(p)	((p) << 11)
 32#define EP_BOFFSET(p)	((p) & 0x3fff)
 33#define EP_BREPEAT(p)	(((p) & 0x7fff) << 16)
 34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35static int is_fs_or_ls(enum usb_device_speed speed)
 36{
 37	return speed == USB_SPEED_FULL || speed == USB_SPEED_LOW;
 38}
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40/*
 41* get the index of bandwidth domains array which @ep belongs to.
 42*
 43* the bandwidth domain array is saved to @sch_array of struct xhci_hcd_mtk,
 44* each HS root port is treated as a single bandwidth domain,
 45* but each SS root port is treated as two bandwidth domains, one for IN eps,
 46* one for OUT eps.
 47* @real_port value is defined as follow according to xHCI spec:
 48* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
 49* so the bandwidth domain array is organized as follow for simplification:
 50* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
 51*/
 52static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
 53	struct usb_host_endpoint *ep)
 
 54{
 
 55	struct xhci_virt_device *virt_dev;
 56	int bw_index;
 57
 58	virt_dev = xhci->devs[udev->slot_id];
 
 
 
 
 59
 60	if (udev->speed >= USB_SPEED_SUPER) {
 61		if (usb_endpoint_dir_out(&ep->desc))
 62			bw_index = (virt_dev->real_port - 1) * 2;
 63		else
 64			bw_index = (virt_dev->real_port - 1) * 2 + 1;
 65	} else {
 66		/* add one more for each SS port */
 67		bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
 68	}
 69
 70	return bw_index;
 71}
 72
 73static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
 74{
 75	u32 esit;
 76
 77	esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
 78	if (esit > XHCI_MTK_MAX_ESIT)
 79		esit = XHCI_MTK_MAX_ESIT;
 80
 81	return esit;
 82}
 83
 84static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
 85{
 86	struct usb_tt *utt = udev->tt;
 87	struct mu3h_sch_tt *tt, **tt_index, **ptt;
 88	unsigned int port;
 89	bool allocated_index = false;
 90
 91	if (!utt)
 92		return NULL;	/* Not below a TT */
 93
 94	/*
 95	 * Find/create our data structure.
 96	 * For hubs with a single TT, we get it directly.
 97	 * For hubs with multiple TTs, there's an extra level of pointers.
 98	 */
 99	tt_index = NULL;
100	if (utt->multi) {
101		tt_index = utt->hcpriv;
102		if (!tt_index) {	/* Create the index array */
103			tt_index = kcalloc(utt->hub->maxchild,
104					sizeof(*tt_index), GFP_KERNEL);
105			if (!tt_index)
106				return ERR_PTR(-ENOMEM);
107			utt->hcpriv = tt_index;
108			allocated_index = true;
109		}
110		port = udev->ttport - 1;
111		ptt = &tt_index[port];
112	} else {
113		port = 0;
114		ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
115	}
116
117	tt = *ptt;
118	if (!tt) {	/* Create the mu3h_sch_tt */
119		tt = kzalloc(sizeof(*tt), GFP_KERNEL);
120		if (!tt) {
121			if (allocated_index) {
122				utt->hcpriv = NULL;
123				kfree(tt_index);
124			}
125			return ERR_PTR(-ENOMEM);
126		}
127		INIT_LIST_HEAD(&tt->ep_list);
128		tt->usb_tt = utt;
129		tt->tt_port = port;
130		*ptt = tt;
131	}
132
133	return tt;
134}
135
136/* Release the TT above udev, if it's not in use */
137static void drop_tt(struct usb_device *udev)
138{
139	struct usb_tt *utt = udev->tt;
140	struct mu3h_sch_tt *tt, **tt_index, **ptt;
141	int i, cnt;
142
143	if (!utt || !utt->hcpriv)
144		return;		/* Not below a TT, or never allocated */
145
146	cnt = 0;
147	if (utt->multi) {
148		tt_index = utt->hcpriv;
149		ptt = &tt_index[udev->ttport - 1];
150		/*  How many entries are left in tt_index? */
151		for (i = 0; i < utt->hub->maxchild; ++i)
152			cnt += !!tt_index[i];
153	} else {
154		tt_index = NULL;
155		ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
156	}
157
158	tt = *ptt;
159	if (!tt || !list_empty(&tt->ep_list))
160		return;		/* never allocated , or still in use*/
161
162	*ptt = NULL;
163	kfree(tt);
164
165	if (cnt == 1) {
166		utt->hcpriv = NULL;
167		kfree(tt_index);
168	}
169}
170
171static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
172	struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
 
173{
174	struct mu3h_sch_ep_info *sch_ep;
 
175	struct mu3h_sch_tt *tt = NULL;
176	u32 len_bw_budget_table;
177	size_t mem_size;
 
 
 
178
179	if (is_fs_or_ls(udev->speed))
180		len_bw_budget_table = TT_MICROFRAMES_MAX;
181	else if ((udev->speed >= USB_SPEED_SUPER)
182			&& usb_endpoint_xfer_isoc(&ep->desc))
183		len_bw_budget_table = get_esit(ep_ctx);
184	else
185		len_bw_budget_table = 1;
186
187	mem_size = sizeof(struct mu3h_sch_ep_info) +
188			len_bw_budget_table * sizeof(u32);
189	sch_ep = kzalloc(mem_size, GFP_KERNEL);
190	if (!sch_ep)
191		return ERR_PTR(-ENOMEM);
192
193	if (is_fs_or_ls(udev->speed)) {
194		tt = find_tt(udev);
195		if (IS_ERR(tt)) {
196			kfree(sch_ep);
197			return ERR_PTR(-ENOMEM);
198		}
199	}
200
 
201	sch_ep->sch_tt = tt;
202	sch_ep->ep = ep;
 
 
 
 
203
204	return sch_ep;
205}
206
207static void setup_sch_info(struct usb_device *udev,
208		struct xhci_ep_ctx *ep_ctx, struct mu3h_sch_ep_info *sch_ep)
209{
210	u32 ep_type;
211	u32 maxpkt;
212	u32 max_burst;
213	u32 mult;
214	u32 esit_pkts;
215	u32 max_esit_payload;
216	u32 *bwb_table = sch_ep->bw_budget_table;
 
217	int i;
218
 
219	ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
220	maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
221	max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
222	mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
223	max_esit_payload =
224		(CTX_TO_MAX_ESIT_PAYLOAD_HI(
225			le32_to_cpu(ep_ctx->ep_info)) << 16) |
226		 CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
227
228	sch_ep->esit = get_esit(ep_ctx);
 
229	sch_ep->ep_type = ep_type;
230	sch_ep->maxpkt = maxpkt;
231	sch_ep->offset = 0;
232	sch_ep->burst_mode = 0;
233	sch_ep->repeat = 0;
234
235	if (udev->speed == USB_SPEED_HIGH) {
236		sch_ep->cs_count = 0;
237
238		/*
239		 * usb_20 spec section5.9
240		 * a single microframe is enough for HS synchromous endpoints
241		 * in a interval
242		 */
243		sch_ep->num_budget_microframes = 1;
244
245		/*
246		 * xHCI spec section6.2.3.4
247		 * @max_burst is the number of additional transactions
248		 * opportunities per microframe
249		 */
250		sch_ep->pkts = max_burst + 1;
251		sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
252		bwb_table[0] = sch_ep->bw_cost_per_microframe;
253	} else if (udev->speed >= USB_SPEED_SUPER) {
254		/* usb3_r1 spec section4.4.7 & 4.4.8 */
255		sch_ep->cs_count = 0;
256		sch_ep->burst_mode = 1;
257		/*
258		 * some device's (d)wBytesPerInterval is set as 0,
259		 * then max_esit_payload is 0, so evaluate esit_pkts from
260		 * mult and burst
261		 */
262		esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
263		if (esit_pkts == 0)
264			esit_pkts = (mult + 1) * (max_burst + 1);
265
266		if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
267			sch_ep->pkts = esit_pkts;
268			sch_ep->num_budget_microframes = 1;
269			bwb_table[0] = maxpkt * sch_ep->pkts;
270		}
271
272		if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
273			u32 remainder;
274
275			if (sch_ep->esit == 1)
276				sch_ep->pkts = esit_pkts;
277			else if (esit_pkts <= sch_ep->esit)
278				sch_ep->pkts = 1;
279			else
280				sch_ep->pkts = roundup_pow_of_two(esit_pkts)
281					/ sch_ep->esit;
282
283			sch_ep->num_budget_microframes =
284				DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
285
286			sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
287			sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
288
289			remainder = sch_ep->bw_cost_per_microframe;
290			remainder *= sch_ep->num_budget_microframes;
291			remainder -= (maxpkt * esit_pkts);
292			for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
293				bwb_table[i] = sch_ep->bw_cost_per_microframe;
294
295			/* last one <= bw_cost_per_microframe */
296			bwb_table[i] = remainder;
297		}
298	} else if (is_fs_or_ls(udev->speed)) {
299		sch_ep->pkts = 1; /* at most one packet for each microframe */
300
301		/*
302		 * num_budget_microframes and cs_count will be updated when
303		 * check TT for INT_OUT_EP, ISOC/INT_IN_EP type
 
304		 */
305		sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
306		sch_ep->num_budget_microframes = sch_ep->cs_count;
307		sch_ep->bw_cost_per_microframe =
308			(maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
309
310		/* init budget table */
311		if (ep_type == ISOC_OUT_EP) {
312			for (i = 0; i < sch_ep->num_budget_microframes; i++)
313				bwb_table[i] =	sch_ep->bw_cost_per_microframe;
 
 
314		} else if (ep_type == INT_OUT_EP) {
315			/* only first one consumes bandwidth, others as zero */
316			bwb_table[0] = sch_ep->bw_cost_per_microframe;
317		} else { /* INT_IN_EP or ISOC_IN_EP */
318			bwb_table[0] = 0; /* start split */
319			bwb_table[1] = 0; /* idle */
320			/*
321			 * due to cs_count will be updated according to cs
322			 * position, assign all remainder budget array
323			 * elements as @bw_cost_per_microframe, but only first
324			 * @num_budget_microframes elements will be used later
 
 
325			 */
326			for (i = 2; i < TT_MICROFRAMES_MAX; i++)
327				bwb_table[i] =	sch_ep->bw_cost_per_microframe;
 
 
 
 
328		}
329	}
330}
331
332/* Get maximum bandwidth when we schedule at offset slot. */
333static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
334	struct mu3h_sch_ep_info *sch_ep, u32 offset)
335{
336	u32 num_esit;
337	u32 max_bw = 0;
338	u32 bw;
339	int i;
340	int j;
341
342	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
343	for (i = 0; i < num_esit; i++) {
344		u32 base = offset + i * sch_ep->esit;
345
346		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
347			bw = sch_bw->bus_bw[base + j] +
348					sch_ep->bw_budget_table[j];
349			if (bw > max_bw)
350				max_bw = bw;
351		}
352	}
353	return max_bw;
354}
355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
357	struct mu3h_sch_ep_info *sch_ep, bool used)
358{
359	u32 num_esit;
360	u32 base;
361	int i;
362	int j;
363
364	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
365	for (i = 0; i < num_esit; i++) {
366		base = sch_ep->offset + i * sch_ep->esit;
367		for (j = 0; j < sch_ep->num_budget_microframes; j++) {
 
368			if (used)
369				sch_bw->bus_bw[base + j] +=
370					sch_ep->bw_budget_table[j];
371			else
372				sch_bw->bus_bw[base + j] -=
373					sch_ep->bw_budget_table[j];
374		}
375	}
376}
377
378static int check_sch_tt(struct usb_device *udev,
379	struct mu3h_sch_ep_info *sch_ep, u32 offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380{
381	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
382	u32 extra_cs_count;
383	u32 fs_budget_start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384	u32 start_ss, last_ss;
385	u32 start_cs, last_cs;
386	int i;
387
388	start_ss = offset % 8;
389	fs_budget_start = (start_ss + 1) % 8;
390
391	if (sch_ep->ep_type == ISOC_OUT_EP) {
392		last_ss = start_ss + sch_ep->cs_count - 1;
393
394		/*
395		 * usb_20 spec section11.18:
396		 * must never schedule Start-Split in Y6
397		 */
398		if (!(start_ss == 7 || last_ss < 6))
399			return -ERANGE;
400
401		for (i = 0; i < sch_ep->cs_count; i++)
402			if (test_bit(offset + i, tt->split_bit_map))
403				return -ERANGE;
404
405	} else {
 
406		u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
407
408		/*
409		 * usb_20 spec section11.18:
410		 * must never schedule Start-Split in Y6
411		 */
412		if (start_ss == 6)
413			return -ERANGE;
414
415		/* one uframe for ss + one uframe for idle */
416		start_cs = (start_ss + 2) % 8;
417		last_cs = start_cs + cs_count - 1;
418
419		if (last_cs > 7)
420			return -ERANGE;
421
422		if (sch_ep->ep_type == ISOC_IN_EP)
423			extra_cs_count = (last_cs == 7) ? 1 : 2;
424		else /*  ep_type : INTR IN / INTR OUT */
425			extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
426
427		cs_count += extra_cs_count;
 
428		if (cs_count > 7)
429			cs_count = 7; /* HW limit */
430
431		for (i = 0; i < cs_count + 2; i++) {
432			if (test_bit(offset + i, tt->split_bit_map))
433				return -ERANGE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
436		sch_ep->cs_count = cs_count;
437		/* one for ss, the other for idle */
438		sch_ep->num_budget_microframes = cs_count + 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
440		/*
441		 * if interval=1, maxp >752, num_budge_micoframe is larger
442		 * than sch_ep->esit, will overstep boundary
443		 */
444		if (sch_ep->num_budget_microframes > sch_ep->esit)
445			sch_ep->num_budget_microframes = sch_ep->esit;
446	}
447
 
 
 
 
 
448	return 0;
449}
450
451static void update_sch_tt(struct usb_device *udev,
452	struct mu3h_sch_ep_info *sch_ep)
453{
454	struct mu3h_sch_tt *tt = sch_ep->sch_tt;
455	u32 base, num_esit;
456	int i, j;
 
 
 
 
 
 
457
458	num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
459	for (i = 0; i < num_esit; i++) {
460		base = sch_ep->offset + i * sch_ep->esit;
461		for (j = 0; j < sch_ep->num_budget_microframes; j++)
462			set_bit(base + j, tt->split_bit_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463	}
464
465	list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
 
 
 
466}
467
468static int check_sch_bw(struct usb_device *udev,
469	struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
470{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471	u32 offset;
472	u32 esit;
473	u32 min_bw;
474	u32 min_index;
475	u32 worst_bw;
476	u32 bw_boundary;
477	u32 min_num_budget;
478	u32 min_cs_count;
479	bool tt_offset_ok = false;
480	int ret;
481
482	esit = sch_ep->esit;
483
484	/*
485	 * Search through all possible schedule microframes.
486	 * and find a microframe where its worst bandwidth is minimum.
487	 */
488	min_bw = ~0;
489	min_index = 0;
490	min_cs_count = sch_ep->cs_count;
491	min_num_budget = sch_ep->num_budget_microframes;
492	for (offset = 0; offset < esit; offset++) {
493		if (is_fs_or_ls(udev->speed)) {
494			ret = check_sch_tt(udev, sch_ep, offset);
495			if (ret)
496				continue;
497			else
498				tt_offset_ok = true;
499		}
500
501		if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
502			break;
 
503
504		worst_bw = get_max_bw(sch_bw, sch_ep, offset);
505		if (min_bw > worst_bw) {
506			min_bw = worst_bw;
507			min_index = offset;
508			min_cs_count = sch_ep->cs_count;
509			min_num_budget = sch_ep->num_budget_microframes;
510		}
511		if (min_bw == 0)
512			break;
513	}
514
515	if (udev->speed == USB_SPEED_SUPER_PLUS)
516		bw_boundary = SSP_BW_BOUNDARY;
517	else if (udev->speed == USB_SPEED_SUPER)
518		bw_boundary = SS_BW_BOUNDARY;
519	else
520		bw_boundary = HS_BW_BOUNDARY;
521
522	/* check bandwidth */
523	if (min_bw > bw_boundary)
524		return -ERANGE;
525
526	sch_ep->offset = min_index;
527	sch_ep->cs_count = min_cs_count;
528	sch_ep->num_budget_microframes = min_num_budget;
529
530	if (is_fs_or_ls(udev->speed)) {
531		/* all offset for tt is not ok*/
532		if (!tt_offset_ok)
533			return -ERANGE;
534
535		update_sch_tt(udev, sch_ep);
536	}
537
538	/* update bus bandwidth info */
539	update_bus_bw(sch_bw, sch_ep, 1);
540
541	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
542}
543
544static bool need_bw_sch(struct usb_host_endpoint *ep,
545	enum usb_device_speed speed, int has_tt)
546{
 
 
547	/* only for periodic endpoints */
548	if (usb_endpoint_xfer_control(&ep->desc)
549		|| usb_endpoint_xfer_bulk(&ep->desc))
550		return false;
551
552	/*
553	 * for LS & FS periodic endpoints which its device is not behind
554	 * a TT are also ignored, root-hub will schedule them directly,
555	 * but need set @bpkts field of endpoint context to 1.
556	 */
557	if (is_fs_or_ls(speed) && !has_tt)
 
 
 
 
558		return false;
559
560	return true;
561}
562
563int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
564{
565	struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
566	struct mu3h_sch_bw_info *sch_array;
567	int num_usb_bus;
568	int i;
569
570	/* ss IN and OUT are separated */
571	num_usb_bus = xhci->usb3_rhub.num_ports * 2 + xhci->usb2_rhub.num_ports;
572
573	sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
574	if (sch_array == NULL)
575		return -ENOMEM;
576
577	for (i = 0; i < num_usb_bus; i++)
578		INIT_LIST_HEAD(&sch_array[i].bw_ep_list);
579
580	mtk->sch_array = sch_array;
 
581
582	return 0;
583}
584EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
585
586void xhci_mtk_sch_exit(struct xhci_hcd_mtk *mtk)
587{
588	kfree(mtk->sch_array);
589}
590EXPORT_SYMBOL_GPL(xhci_mtk_sch_exit);
591
592int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
593		struct usb_host_endpoint *ep)
594{
595	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
596	struct xhci_hcd *xhci;
597	struct xhci_ep_ctx *ep_ctx;
598	struct xhci_slot_ctx *slot_ctx;
599	struct xhci_virt_device *virt_dev;
600	struct mu3h_sch_bw_info *sch_bw;
601	struct mu3h_sch_ep_info *sch_ep;
602	struct mu3h_sch_bw_info *sch_array;
603	unsigned int ep_index;
604	int bw_index;
605	int ret = 0;
606
607	xhci = hcd_to_xhci(hcd);
608	virt_dev = xhci->devs[udev->slot_id];
609	ep_index = xhci_get_endpoint_index(&ep->desc);
610	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
611	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
612	sch_array = mtk->sch_array;
613
614	xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
615		__func__, usb_endpoint_type(&ep->desc), udev->speed,
616		usb_endpoint_maxp(&ep->desc),
617		usb_endpoint_dir_in(&ep->desc), ep);
618
619	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
620		/*
621		 * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
622		 * device does not connected through an external HS hub
623		 */
624		if (usb_endpoint_xfer_int(&ep->desc)
625			|| usb_endpoint_xfer_isoc(&ep->desc))
626			ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
627
628		return 0;
629	}
630
631	bw_index = get_bw_index(xhci, udev, ep);
632	sch_bw = &sch_array[bw_index];
633
634	sch_ep = create_sch_ep(udev, ep, ep_ctx);
635	if (IS_ERR_OR_NULL(sch_ep))
636		return -ENOMEM;
637
638	setup_sch_info(udev, ep_ctx, sch_ep);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639
640	ret = check_sch_bw(udev, sch_bw, sch_ep);
641	if (ret) {
642		xhci_err(xhci, "Not enough bandwidth!\n");
643		if (is_fs_or_ls(udev->speed))
644			drop_tt(udev);
645
646		kfree(sch_ep);
647		return -ENOSPC;
 
 
 
 
648	}
 
649
650	list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
 
 
 
 
 
 
 
 
651
652	ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
653		| EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
654	ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
655		| EP_BREPEAT(sch_ep->repeat));
 
 
 
 
 
 
 
656
657	xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
 
 
 
 
 
 
 
658			sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
659			sch_ep->offset, sch_ep->repeat);
 
660
661	return 0;
 
 
 
 
662}
663EXPORT_SYMBOL_GPL(xhci_mtk_add_ep_quirk);
664
665void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
666		struct usb_host_endpoint *ep)
667{
668	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
669	struct xhci_hcd *xhci;
670	struct xhci_slot_ctx *slot_ctx;
671	struct xhci_virt_device *virt_dev;
672	struct mu3h_sch_bw_info *sch_array;
673	struct mu3h_sch_bw_info *sch_bw;
674	struct mu3h_sch_ep_info *sch_ep;
675	int bw_index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
676
677	xhci = hcd_to_xhci(hcd);
678	virt_dev = xhci->devs[udev->slot_id];
679	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
680	sch_array = mtk->sch_array;
681
682	xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n",
683		__func__, usb_endpoint_type(&ep->desc), udev->speed,
684		usb_endpoint_maxp(&ep->desc),
685		usb_endpoint_dir_in(&ep->desc), ep);
686
687	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
688		return;
 
689
690	bw_index = get_bw_index(xhci, udev, ep);
691	sch_bw = &sch_array[bw_index];
692
693	list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
694		if (sch_ep->ep == ep) {
695			update_bus_bw(sch_bw, sch_ep, 0);
696			list_del(&sch_ep->endpoint);
697			if (is_fs_or_ls(udev->speed)) {
698				list_del(&sch_ep->tt_endpoint);
699				drop_tt(udev);
700			}
701			kfree(sch_ep);
702			break;
703		}
704	}
705}
706EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);