Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
   4 */
   5
   6#include <linux/module.h>
   7#include "mt76.h"
   8#include "usb_trace.h"
   9#include "dma.h"
  10
  11#define MT_VEND_REQ_MAX_RETRY	10
  12#define MT_VEND_REQ_TOUT_MS	300
  13
  14static bool disable_usb_sg;
  15module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
  16MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
  17
  18int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
  19			   u16 val, u16 offset, void *buf, size_t len)
  20{
  21	struct usb_interface *uintf = to_usb_interface(dev->dev);
  22	struct usb_device *udev = interface_to_usbdev(uintf);
  23	unsigned int pipe;
  24	int i, ret;
  25
  26	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
  27
  28	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  29				       : usb_sndctrlpipe(udev, 0);
  30	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  31		if (test_bit(MT76_REMOVED, &dev->phy.state))
  32			return -EIO;
  33
  34		ret = usb_control_msg(udev, pipe, req, req_type, val,
  35				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
  36		if (ret == -ENODEV)
  37			set_bit(MT76_REMOVED, &dev->phy.state);
  38		if (ret >= 0 || ret == -ENODEV)
  39			return ret;
  40		usleep_range(5000, 10000);
  41	}
  42
  43	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  44		req, offset, ret);
  45	return ret;
  46}
  47EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
  48
  49int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  50			 u8 req_type, u16 val, u16 offset,
  51			 void *buf, size_t len)
  52{
  53	int ret;
  54
  55	mutex_lock(&dev->usb.usb_ctrl_mtx);
  56	ret = __mt76u_vendor_request(dev, req, req_type,
  57				     val, offset, buf, len);
  58	trace_usb_reg_wr(dev, offset, val);
  59	mutex_unlock(&dev->usb.usb_ctrl_mtx);
  60
  61	return ret;
  62}
  63EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  64
  65u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
  66{
  67	struct mt76_usb *usb = &dev->usb;
  68	u32 data = ~0;
  69	int ret;
  70
  71	ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
  72				     addr, usb->data, sizeof(__le32));
  73	if (ret == sizeof(__le32))
  74		data = get_unaligned_le32(usb->data);
  75	trace_usb_reg_rr(dev, addr, data);
  76
  77	return data;
  78}
  79EXPORT_SYMBOL_GPL(___mt76u_rr);
  80
  81static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  82{
  83	u8 req;
  84
  85	switch (addr & MT_VEND_TYPE_MASK) {
  86	case MT_VEND_TYPE_EEPROM:
  87		req = MT_VEND_READ_EEPROM;
  88		break;
  89	case MT_VEND_TYPE_CFG:
  90		req = MT_VEND_READ_CFG;
  91		break;
  92	default:
  93		req = MT_VEND_MULTI_READ;
  94		break;
  95	}
  96
  97	return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
  98			   addr & ~MT_VEND_TYPE_MASK);
  99}
 100
 101static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
 102{
 103	u32 ret;
 104
 105	mutex_lock(&dev->usb.usb_ctrl_mtx);
 106	ret = __mt76u_rr(dev, addr);
 107	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 108
 109	return ret;
 110}
 111
 112void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
 113		 u32 addr, u32 val)
 114{
 115	struct mt76_usb *usb = &dev->usb;
 116
 117	put_unaligned_le32(val, usb->data);
 118	__mt76u_vendor_request(dev, req, req_type, addr >> 16,
 119			       addr, usb->data, sizeof(__le32));
 120	trace_usb_reg_wr(dev, addr, val);
 121}
 122EXPORT_SYMBOL_GPL(___mt76u_wr);
 123
 124static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 125{
 126	u8 req;
 127
 128	switch (addr & MT_VEND_TYPE_MASK) {
 129	case MT_VEND_TYPE_CFG:
 130		req = MT_VEND_WRITE_CFG;
 131		break;
 132	default:
 133		req = MT_VEND_MULTI_WRITE;
 134		break;
 135	}
 136	___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
 137		    addr & ~MT_VEND_TYPE_MASK, val);
 138}
 139
 140static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 141{
 142	mutex_lock(&dev->usb.usb_ctrl_mtx);
 143	__mt76u_wr(dev, addr, val);
 144	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 145}
 146
 147static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
 148		     u32 mask, u32 val)
 149{
 150	mutex_lock(&dev->usb.usb_ctrl_mtx);
 151	val |= __mt76u_rr(dev, addr) & ~mask;
 152	__mt76u_wr(dev, addr, val);
 153	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 154
 155	return val;
 156}
 157
 158static void mt76u_copy(struct mt76_dev *dev, u32 offset,
 159		       const void *data, int len)
 160{
 161	struct mt76_usb *usb = &dev->usb;
 162	const u8 *val = data;
 163	int ret;
 164	int current_batch_size;
 165	int i = 0;
 166
 167	/* Assure that always a multiple of 4 bytes are copied,
 168	 * otherwise beacons can be corrupted.
 169	 * See: "mt76: round up length on mt76_wr_copy"
 170	 * Commit 850e8f6fbd5d0003b0
 171	 */
 172	len = round_up(len, 4);
 173
 174	mutex_lock(&usb->usb_ctrl_mtx);
 175	while (i < len) {
 176		current_batch_size = min_t(int, usb->data_len, len - i);
 177		memcpy(usb->data, val + i, current_batch_size);
 178		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
 179					     USB_DIR_OUT | USB_TYPE_VENDOR,
 180					     0, offset + i, usb->data,
 181					     current_batch_size);
 182		if (ret < 0)
 183			break;
 184
 185		i += current_batch_size;
 186	}
 187	mutex_unlock(&usb->usb_ctrl_mtx);
 188}
 189
 190void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
 191		     void *data, int len)
 192{
 193	struct mt76_usb *usb = &dev->usb;
 194	int i = 0, batch_len, ret;
 195	u8 *val = data;
 196
 197	len = round_up(len, 4);
 198	mutex_lock(&usb->usb_ctrl_mtx);
 199	while (i < len) {
 200		batch_len = min_t(int, usb->data_len, len - i);
 201		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
 202					     USB_DIR_IN | USB_TYPE_VENDOR,
 203					     (offset + i) >> 16, offset + i,
 204					     usb->data, batch_len);
 205		if (ret < 0)
 206			break;
 207
 208		memcpy(val + i, usb->data, batch_len);
 209		i += batch_len;
 210	}
 211	mutex_unlock(&usb->usb_ctrl_mtx);
 212}
 213EXPORT_SYMBOL_GPL(mt76u_read_copy);
 214
 215void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 216		     const u16 offset, const u32 val)
 217{
 218	mutex_lock(&dev->usb.usb_ctrl_mtx);
 219	__mt76u_vendor_request(dev, req,
 220			       USB_DIR_OUT | USB_TYPE_VENDOR,
 221			       val & 0xffff, offset, NULL, 0);
 222	__mt76u_vendor_request(dev, req,
 223			       USB_DIR_OUT | USB_TYPE_VENDOR,
 224			       val >> 16, offset + 2, NULL, 0);
 225	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 226}
 227EXPORT_SYMBOL_GPL(mt76u_single_wr);
 228
 229static int
 230mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
 231		const struct mt76_reg_pair *data, int len)
 232{
 233	struct mt76_usb *usb = &dev->usb;
 234
 235	mutex_lock(&usb->usb_ctrl_mtx);
 236	while (len > 0) {
 237		__mt76u_wr(dev, base + data->reg, data->value);
 238		len--;
 239		data++;
 240	}
 241	mutex_unlock(&usb->usb_ctrl_mtx);
 242
 243	return 0;
 244}
 245
 246static int
 247mt76u_wr_rp(struct mt76_dev *dev, u32 base,
 248	    const struct mt76_reg_pair *data, int n)
 249{
 250	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
 251		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
 252	else
 253		return mt76u_req_wr_rp(dev, base, data, n);
 254}
 255
 256static int
 257mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
 258		int len)
 259{
 260	struct mt76_usb *usb = &dev->usb;
 261
 262	mutex_lock(&usb->usb_ctrl_mtx);
 263	while (len > 0) {
 264		data->value = __mt76u_rr(dev, base + data->reg);
 265		len--;
 266		data++;
 267	}
 268	mutex_unlock(&usb->usb_ctrl_mtx);
 269
 270	return 0;
 271}
 272
 273static int
 274mt76u_rd_rp(struct mt76_dev *dev, u32 base,
 275	    struct mt76_reg_pair *data, int n)
 276{
 277	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
 278		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
 279	else
 280		return mt76u_req_rd_rp(dev, base, data, n);
 281}
 282
 283static bool mt76u_check_sg(struct mt76_dev *dev)
 284{
 285	struct usb_interface *uintf = to_usb_interface(dev->dev);
 286	struct usb_device *udev = interface_to_usbdev(uintf);
 287
 288	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
 289		udev->bus->no_sg_constraint);
 290}
 291
 292static int
 293mt76u_set_endpoints(struct usb_interface *intf,
 294		    struct mt76_usb *usb)
 295{
 296	struct usb_host_interface *intf_desc = intf->cur_altsetting;
 297	struct usb_endpoint_descriptor *ep_desc;
 298	int i, in_ep = 0, out_ep = 0;
 299
 300	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
 301		ep_desc = &intf_desc->endpoint[i].desc;
 302
 303		if (usb_endpoint_is_bulk_in(ep_desc) &&
 304		    in_ep < __MT_EP_IN_MAX) {
 305			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
 306			in_ep++;
 307		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
 308			   out_ep < __MT_EP_OUT_MAX) {
 309			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
 310			out_ep++;
 311		}
 312	}
 313
 314	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
 315		return -EINVAL;
 316	return 0;
 317}
 318
 319static int
 320mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
 321		 int nsgs)
 322{
 323	int i;
 324
 325	for (i = 0; i < nsgs; i++) {
 326		void *data;
 327		int offset;
 328
 329		data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
 330		if (!data)
 331			break;
 332
 333		sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
 334			    offset);
 335	}
 336
 337	if (i < nsgs) {
 338		int j;
 339
 340		for (j = nsgs; j < urb->num_sgs; j++)
 341			mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
 342		urb->num_sgs = i;
 343	}
 344
 345	urb->num_sgs = max_t(int, i, urb->num_sgs);
 346	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
 347	sg_init_marker(urb->sg, urb->num_sgs);
 348
 349	return i ? : -ENOMEM;
 350}
 351
 352static int
 353mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
 354		struct urb *urb, int nsgs)
 355{
 356	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
 357	int offset;
 358
 359	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
 360		return mt76u_fill_rx_sg(dev, q, urb, nsgs);
 361
 362	urb->transfer_buffer_length = q->buf_size;
 363	urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
 364
 365	return urb->transfer_buffer ? 0 : -ENOMEM;
 366}
 367
 368static int
 369mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
 370		int sg_max_size)
 371{
 372	unsigned int size = sizeof(struct urb);
 373
 374	if (dev->usb.sg_en)
 375		size += sg_max_size * sizeof(struct scatterlist);
 376
 377	e->urb = kzalloc(size, GFP_KERNEL);
 378	if (!e->urb)
 379		return -ENOMEM;
 380
 381	usb_init_urb(e->urb);
 382
 383	if (dev->usb.sg_en && sg_max_size > 0)
 384		e->urb->sg = (struct scatterlist *)(e->urb + 1);
 385
 386	return 0;
 387}
 388
 389static int
 390mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
 391		   struct mt76_queue_entry *e)
 392{
 393	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
 394	int err, sg_size;
 395
 396	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
 397	err = mt76u_urb_alloc(dev, e, sg_size);
 398	if (err)
 399		return err;
 400
 401	return mt76u_refill_rx(dev, q, e->urb, sg_size);
 402}
 403
 404static void mt76u_urb_free(struct urb *urb)
 405{
 406	int i;
 407
 408	for (i = 0; i < urb->num_sgs; i++)
 409		mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
 410
 411	if (urb->transfer_buffer)
 412		mt76_put_page_pool_buf(urb->transfer_buffer, false);
 413
 414	usb_free_urb(urb);
 415}
 416
 417static void
 418mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
 419		    struct urb *urb, usb_complete_t complete_fn,
 420		    void *context)
 421{
 422	struct usb_interface *uintf = to_usb_interface(dev->dev);
 423	struct usb_device *udev = interface_to_usbdev(uintf);
 424	unsigned int pipe;
 425
 426	if (dir == USB_DIR_IN)
 427		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
 428	else
 429		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
 430
 431	urb->dev = udev;
 432	urb->pipe = pipe;
 433	urb->complete = complete_fn;
 434	urb->context = context;
 435}
 436
 437static struct urb *
 438mt76u_get_next_rx_entry(struct mt76_queue *q)
 439{
 440	struct urb *urb = NULL;
 441	unsigned long flags;
 442
 443	spin_lock_irqsave(&q->lock, flags);
 444	if (q->queued > 0) {
 445		urb = q->entry[q->tail].urb;
 446		q->tail = (q->tail + 1) % q->ndesc;
 447		q->queued--;
 448	}
 449	spin_unlock_irqrestore(&q->lock, flags);
 450
 451	return urb;
 452}
 453
 454static int
 455mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
 456		       u32 data_len)
 457{
 458	u16 dma_len, min_len;
 459
 460	dma_len = get_unaligned_le16(data);
 461	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
 462		return dma_len;
 463
 464	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
 465	if (data_len < min_len || !dma_len ||
 466	    dma_len + MT_DMA_HDR_LEN > data_len ||
 467	    (dma_len & 0x3))
 468		return -EINVAL;
 469	return dma_len;
 470}
 471
 472static struct sk_buff *
 473mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
 474		   int len, int buf_size)
 475{
 476	int head_room, drv_flags = dev->drv->drv_flags;
 477	struct sk_buff *skb;
 478
 479	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
 480	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
 481		struct page *page;
 482
 483		/* slow path, not enough space for data and
 484		 * skb_shared_info
 485		 */
 486		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
 487		if (!skb)
 488			return NULL;
 489
 490		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
 491		data += head_room + MT_SKB_HEAD_LEN;
 492		page = virt_to_head_page(data);
 493		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 494				page, data - page_address(page),
 495				len - MT_SKB_HEAD_LEN, buf_size);
 496
 497		return skb;
 498	}
 499
 500	/* fast path */
 501	skb = build_skb(data, buf_size);
 502	if (!skb)
 503		return NULL;
 504
 505	skb_reserve(skb, head_room);
 506	__skb_put(skb, len);
 507
 508	return skb;
 509}
 510
 511static int
 512mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
 513		       int buf_size)
 514{
 515	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
 516	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
 517	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
 518	struct sk_buff *skb;
 519
 520	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
 521		return 0;
 522
 523	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
 524	if (len < 0)
 525		return 0;
 526
 527	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
 528	data_len = min_t(int, len, data_len - head_room);
 529
 530	if (len == data_len &&
 531	    dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
 532		return 0;
 533
 534	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
 535	if (!skb)
 536		return 0;
 537
 538	len -= data_len;
 539	while (len > 0 && nsgs < urb->num_sgs) {
 540		data_len = min_t(int, len, urb->sg[nsgs].length);
 541		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 542				sg_page(&urb->sg[nsgs]),
 543				urb->sg[nsgs].offset, data_len,
 544				buf_size);
 545		len -= data_len;
 546		nsgs++;
 547	}
 548
 549	skb_mark_for_recycle(skb);
 550	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
 551
 552	return nsgs;
 553}
 554
 555static void mt76u_complete_rx(struct urb *urb)
 556{
 557	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 558	struct mt76_queue *q = urb->context;
 559	unsigned long flags;
 560
 561	trace_rx_urb(dev, urb);
 562
 563	switch (urb->status) {
 564	case -ECONNRESET:
 565	case -ESHUTDOWN:
 566	case -ENOENT:
 567	case -EPROTO:
 568		return;
 569	default:
 570		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 571				    urb->status);
 572		fallthrough;
 573	case 0:
 574		break;
 575	}
 576
 577	spin_lock_irqsave(&q->lock, flags);
 578	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
 579		goto out;
 580
 581	q->head = (q->head + 1) % q->ndesc;
 582	q->queued++;
 583	mt76_worker_schedule(&dev->usb.rx_worker);
 584out:
 585	spin_unlock_irqrestore(&q->lock, flags);
 586}
 587
 588static int
 589mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
 590		    struct urb *urb)
 591{
 592	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
 593
 594	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
 595			    mt76u_complete_rx, &dev->q_rx[qid]);
 596	trace_submit_urb(dev, urb);
 597
 598	return usb_submit_urb(urb, GFP_ATOMIC);
 599}
 600
 601static void
 602mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 603{
 604	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
 605	struct urb *urb;
 606	int err, count;
 607
 608	while (true) {
 609		urb = mt76u_get_next_rx_entry(q);
 610		if (!urb)
 611			break;
 612
 613		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
 614		if (count > 0) {
 615			err = mt76u_refill_rx(dev, q, urb, count);
 616			if (err < 0)
 617				break;
 618		}
 619		mt76u_submit_rx_buf(dev, qid, urb);
 620	}
 621	if (qid == MT_RXQ_MAIN) {
 622		local_bh_disable();
 623		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
 624		local_bh_enable();
 625	}
 626}
 627
 628static void mt76u_rx_worker(struct mt76_worker *w)
 629{
 630	struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
 631	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
 632	int i;
 633
 634	rcu_read_lock();
 635	mt76_for_each_q_rx(dev, i)
 636		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
 637	rcu_read_unlock();
 638}
 639
 640static int
 641mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
 642{
 643	struct mt76_queue *q = &dev->q_rx[qid];
 644	unsigned long flags;
 645	int i, err = 0;
 646
 647	spin_lock_irqsave(&q->lock, flags);
 648	for (i = 0; i < q->ndesc; i++) {
 649		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
 650		if (err < 0)
 651			break;
 652	}
 653	q->head = q->tail = 0;
 654	q->queued = 0;
 655	spin_unlock_irqrestore(&q->lock, flags);
 656
 657	return err;
 658}
 659
 660static int
 661mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
 662{
 663	struct mt76_queue *q = &dev->q_rx[qid];
 664	int i, err;
 665
 666	err = mt76_create_page_pool(dev, q);
 667	if (err)
 668		return err;
 669
 670	spin_lock_init(&q->lock);
 671	q->entry = devm_kcalloc(dev->dev,
 672				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
 673				GFP_KERNEL);
 674	if (!q->entry)
 675		return -ENOMEM;
 676
 677	q->ndesc = MT_NUM_RX_ENTRIES;
 678	q->buf_size = PAGE_SIZE;
 679
 680	for (i = 0; i < q->ndesc; i++) {
 681		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
 682		if (err < 0)
 683			return err;
 684	}
 685
 686	return mt76u_submit_rx_buffers(dev, qid);
 687}
 688
 689int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
 690{
 691	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
 692}
 693EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
 694
 695static void
 696mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 697{
 698	int i;
 699
 700	for (i = 0; i < q->ndesc; i++) {
 701		if (!q->entry[i].urb)
 702			continue;
 703
 704		mt76u_urb_free(q->entry[i].urb);
 705		q->entry[i].urb = NULL;
 706	}
 707	page_pool_destroy(q->page_pool);
 708	q->page_pool = NULL;
 709}
 710
 711static void mt76u_free_rx(struct mt76_dev *dev)
 712{
 713	int i;
 714
 715	mt76_worker_teardown(&dev->usb.rx_worker);
 716
 717	mt76_for_each_q_rx(dev, i)
 718		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
 719}
 720
 721void mt76u_stop_rx(struct mt76_dev *dev)
 722{
 723	int i;
 724
 725	mt76_worker_disable(&dev->usb.rx_worker);
 726
 727	mt76_for_each_q_rx(dev, i) {
 728		struct mt76_queue *q = &dev->q_rx[i];
 729		int j;
 730
 731		for (j = 0; j < q->ndesc; j++)
 732			usb_poison_urb(q->entry[j].urb);
 733	}
 734}
 735EXPORT_SYMBOL_GPL(mt76u_stop_rx);
 736
 737int mt76u_resume_rx(struct mt76_dev *dev)
 738{
 739	int i;
 740
 741	mt76_for_each_q_rx(dev, i) {
 742		struct mt76_queue *q = &dev->q_rx[i];
 743		int err, j;
 744
 745		for (j = 0; j < q->ndesc; j++)
 746			usb_unpoison_urb(q->entry[j].urb);
 747
 748		err = mt76u_submit_rx_buffers(dev, i);
 749		if (err < 0)
 750			return err;
 751	}
 752
 753	mt76_worker_enable(&dev->usb.rx_worker);
 754
 755	return 0;
 756}
 757EXPORT_SYMBOL_GPL(mt76u_resume_rx);
 758
 759static void mt76u_status_worker(struct mt76_worker *w)
 760{
 761	struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
 762	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
 763	struct mt76_queue_entry entry;
 764	struct mt76_queue *q;
 765	int i;
 766
 767	if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
 768		return;
 769
 770	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 771		q = dev->phy.q_tx[i];
 772		if (!q)
 773			continue;
 774
 775		while (q->queued > 0) {
 776			if (!q->entry[q->tail].done)
 777				break;
 778
 779			entry = q->entry[q->tail];
 780			q->entry[q->tail].done = false;
 781
 782			mt76_queue_tx_complete(dev, q, &entry);
 783		}
 784
 785		if (!q->queued)
 786			wake_up(&dev->tx_wait);
 787
 788		mt76_worker_schedule(&dev->tx_worker);
 789	}
 790
 791	if (dev->drv->tx_status_data &&
 792	    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
 793		queue_work(dev->wq, &dev->usb.stat_work);
 794}
 795
 796static void mt76u_tx_status_data(struct work_struct *work)
 797{
 798	struct mt76_usb *usb;
 799	struct mt76_dev *dev;
 800	u8 update = 1;
 801	u16 count = 0;
 802
 803	usb = container_of(work, struct mt76_usb, stat_work);
 804	dev = container_of(usb, struct mt76_dev, usb);
 805
 806	while (true) {
 807		if (test_bit(MT76_REMOVED, &dev->phy.state))
 808			break;
 809
 810		if (!dev->drv->tx_status_data(dev, &update))
 811			break;
 812		count++;
 813	}
 814
 815	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
 816		queue_work(dev->wq, &usb->stat_work);
 817	else
 818		clear_bit(MT76_READING_STATS, &dev->phy.state);
 819}
 820
 821static void mt76u_complete_tx(struct urb *urb)
 822{
 823	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 824	struct mt76_queue_entry *e = urb->context;
 825
 826	if (mt76u_urb_error(urb))
 827		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
 828	e->done = true;
 829
 830	mt76_worker_schedule(&dev->usb.status_worker);
 831}
 832
 833static int
 834mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
 835		       struct urb *urb)
 836{
 837	urb->transfer_buffer_length = skb->len;
 838
 839	if (!dev->usb.sg_en) {
 840		urb->transfer_buffer = skb->data;
 841		return 0;
 842	}
 843
 844	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
 845	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
 846	if (!urb->num_sgs)
 847		return -ENOMEM;
 848
 849	return urb->num_sgs;
 850}
 851
 852static int
 853mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 854		   enum mt76_txq_id qid, struct sk_buff *skb,
 855		   struct mt76_wcid *wcid, struct ieee80211_sta *sta)
 856{
 857	struct mt76_tx_info tx_info = {
 858		.skb = skb,
 859	};
 860	u16 idx = q->head;
 861	int err;
 862
 863	if (q->queued == q->ndesc)
 864		return -ENOSPC;
 865
 866	skb->prev = skb->next = NULL;
 867	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
 868	if (err < 0)
 869		return err;
 870
 871	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
 872	if (err < 0)
 873		return err;
 874
 875	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
 876			    q->entry[idx].urb, mt76u_complete_tx,
 877			    &q->entry[idx]);
 878
 879	q->head = (q->head + 1) % q->ndesc;
 880	q->entry[idx].skb = tx_info.skb;
 881	q->entry[idx].wcid = 0xffff;
 882	q->queued++;
 883
 884	return idx;
 885}
 886
 887static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
 888{
 889	struct urb *urb;
 890	int err;
 891
 892	while (q->first != q->head) {
 893		urb = q->entry[q->first].urb;
 894
 895		trace_submit_urb(dev, urb);
 896		err = usb_submit_urb(urb, GFP_ATOMIC);
 897		if (err < 0) {
 898			if (err == -ENODEV)
 899				set_bit(MT76_REMOVED, &dev->phy.state);
 900			else
 901				dev_err(dev->dev, "tx urb submit failed:%d\n",
 902					err);
 903			break;
 904		}
 905		q->first = (q->first + 1) % q->ndesc;
 906	}
 907}
 908
 909static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
 910{
 911	if (mt76_chip(dev) == 0x7663) {
 912		static const u8 lmac_queue_map[] = {
 913			/* ac to lmac mapping */
 914			[IEEE80211_AC_BK] = 0,
 915			[IEEE80211_AC_BE] = 1,
 916			[IEEE80211_AC_VI] = 2,
 917			[IEEE80211_AC_VO] = 4,
 918		};
 919
 920		if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
 921			return 1; /* BE */
 922
 923		return lmac_queue_map[ac];
 924	}
 925
 926	return mt76_ac_to_hwq(ac);
 927}
 928
 929static int mt76u_alloc_tx(struct mt76_dev *dev)
 930{
 931	struct mt76_queue *q;
 932	int i, j, err;
 933
 934	for (i = 0; i <= MT_TXQ_PSD; i++) {
 935		if (i >= IEEE80211_NUM_ACS) {
 936			dev->phy.q_tx[i] = dev->phy.q_tx[0];
 937			continue;
 938		}
 939
 940		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
 941		if (!q)
 942			return -ENOMEM;
 943
 944		spin_lock_init(&q->lock);
 945		q->hw_idx = mt76u_ac_to_hwq(dev, i);
 946
 947		dev->phy.q_tx[i] = q;
 948
 949		q->entry = devm_kcalloc(dev->dev,
 950					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
 951					GFP_KERNEL);
 952		if (!q->entry)
 953			return -ENOMEM;
 954
 955		q->ndesc = MT_NUM_TX_ENTRIES;
 956		for (j = 0; j < q->ndesc; j++) {
 957			err = mt76u_urb_alloc(dev, &q->entry[j],
 958					      MT_TX_SG_MAX_SIZE);
 959			if (err < 0)
 960				return err;
 961		}
 962	}
 963	return 0;
 964}
 965
 966static void mt76u_free_tx(struct mt76_dev *dev)
 967{
 968	int i;
 969
 970	mt76_worker_teardown(&dev->usb.status_worker);
 971
 972	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 973		struct mt76_queue *q;
 974		int j;
 975
 976		q = dev->phy.q_tx[i];
 977		if (!q)
 978			continue;
 979
 980		for (j = 0; j < q->ndesc; j++) {
 981			usb_free_urb(q->entry[j].urb);
 982			q->entry[j].urb = NULL;
 983		}
 984	}
 985}
 986
 987void mt76u_stop_tx(struct mt76_dev *dev)
 988{
 989	int ret;
 990
 991	mt76_worker_disable(&dev->usb.status_worker);
 992
 993	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
 994				 HZ / 5);
 995	if (!ret) {
 996		struct mt76_queue_entry entry;
 997		struct mt76_queue *q;
 998		int i, j;
 999
1000		dev_err(dev->dev, "timed out waiting for pending tx\n");
1001
1002		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1003			q = dev->phy.q_tx[i];
1004			if (!q)
1005				continue;
1006
1007			for (j = 0; j < q->ndesc; j++)
1008				usb_kill_urb(q->entry[j].urb);
1009		}
1010
1011		mt76_worker_disable(&dev->tx_worker);
1012
1013		/* On device removal we maight queue skb's, but mt76u_tx_kick()
1014		 * will fail to submit urb, cleanup those skb's manually.
1015		 */
1016		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1017			q = dev->phy.q_tx[i];
1018			if (!q)
1019				continue;
1020
1021			while (q->queued > 0) {
1022				entry = q->entry[q->tail];
1023				q->entry[q->tail].done = false;
1024				mt76_queue_tx_complete(dev, q, &entry);
1025			}
1026		}
1027
1028		mt76_worker_enable(&dev->tx_worker);
1029	}
1030
1031	cancel_work_sync(&dev->usb.stat_work);
1032	clear_bit(MT76_READING_STATS, &dev->phy.state);
1033
1034	mt76_worker_enable(&dev->usb.status_worker);
1035
1036	mt76_tx_status_check(dev, true);
1037}
1038EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1039
1040void mt76u_queues_deinit(struct mt76_dev *dev)
1041{
1042	mt76u_stop_rx(dev);
1043	mt76u_stop_tx(dev);
1044
1045	mt76u_free_rx(dev);
1046	mt76u_free_tx(dev);
1047}
1048EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1049
1050int mt76u_alloc_queues(struct mt76_dev *dev)
1051{
1052	int err;
1053
1054	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1055	if (err < 0)
1056		return err;
1057
1058	return mt76u_alloc_tx(dev);
1059}
1060EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1061
1062static const struct mt76_queue_ops usb_queue_ops = {
1063	.tx_queue_skb = mt76u_tx_queue_skb,
1064	.kick = mt76u_tx_kick,
1065};
1066
1067int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1068		 struct mt76_bus_ops *ops)
1069{
1070	struct usb_device *udev = interface_to_usbdev(intf);
1071	struct mt76_usb *usb = &dev->usb;
1072	int err;
1073
1074	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1075
1076	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
1077	if (usb->data_len < 32)
1078		usb->data_len = 32;
1079
1080	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1081	if (!usb->data)
1082		return -ENOMEM;
1083
1084	mutex_init(&usb->usb_ctrl_mtx);
1085	dev->bus = ops;
1086	dev->queue_ops = &usb_queue_ops;
1087
1088	dev_set_drvdata(&udev->dev, dev);
1089
1090	usb->sg_en = mt76u_check_sg(dev);
1091
1092	err = mt76u_set_endpoints(intf, usb);
1093	if (err < 0)
1094		return err;
1095
1096	err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1097				"usb-rx");
1098	if (err)
1099		return err;
1100
1101	err = mt76_worker_setup(dev->hw, &usb->status_worker,
1102				mt76u_status_worker, "usb-status");
1103	if (err)
1104		return err;
1105
1106	sched_set_fifo_low(usb->rx_worker.task);
1107	sched_set_fifo_low(usb->status_worker.task);
1108
1109	return 0;
1110}
1111EXPORT_SYMBOL_GPL(__mt76u_init);
1112
1113int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
1114{
1115	static struct mt76_bus_ops bus_ops = {
1116		.rr = mt76u_rr,
1117		.wr = mt76u_wr,
1118		.rmw = mt76u_rmw,
1119		.read_copy = mt76u_read_copy,
1120		.write_copy = mt76u_copy,
1121		.wr_rp = mt76u_wr_rp,
1122		.rd_rp = mt76u_rd_rp,
1123		.type = MT76_BUS_USB,
1124	};
1125
1126	return __mt76u_init(dev, intf, &bus_ops);
1127}
1128EXPORT_SYMBOL_GPL(mt76u_init);
1129
1130MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1131MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
1132MODULE_LICENSE("Dual BSD/GPL");