Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
   4 */
   5
   6#include <linux/module.h>
   7#include "mt76.h"
   8#include "usb_trace.h"
   9#include "dma.h"
  10
  11#define MT_VEND_REQ_MAX_RETRY	10
  12#define MT_VEND_REQ_TOUT_MS	300
  13
  14static bool disable_usb_sg;
  15module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
  16MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
  17
  18int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
  19			   u16 val, u16 offset, void *buf, size_t len)
 
  20{
  21	struct usb_interface *uintf = to_usb_interface(dev->dev);
  22	struct usb_device *udev = interface_to_usbdev(uintf);
  23	unsigned int pipe;
  24	int i, ret;
  25
  26	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
  27
  28	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  29				       : usb_sndctrlpipe(udev, 0);
  30	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  31		if (test_bit(MT76_REMOVED, &dev->phy.state))
  32			return -EIO;
  33
  34		ret = usb_control_msg(udev, pipe, req, req_type, val,
  35				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
  36		if (ret == -ENODEV)
  37			set_bit(MT76_REMOVED, &dev->phy.state);
  38		if (ret >= 0 || ret == -ENODEV)
  39			return ret;
  40		usleep_range(5000, 10000);
  41	}
  42
  43	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  44		req, offset, ret);
  45	return ret;
  46}
  47EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
  48
  49int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  50			 u8 req_type, u16 val, u16 offset,
  51			 void *buf, size_t len)
  52{
  53	int ret;
  54
  55	mutex_lock(&dev->usb.usb_ctrl_mtx);
  56	ret = __mt76u_vendor_request(dev, req, req_type,
  57				     val, offset, buf, len);
  58	trace_usb_reg_wr(dev, offset, val);
  59	mutex_unlock(&dev->usb.usb_ctrl_mtx);
  60
  61	return ret;
  62}
  63EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  64
  65u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
  66{
  67	struct mt76_usb *usb = &dev->usb;
  68	u32 data = ~0;
  69	int ret;
  70
  71	ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
  72				     addr, usb->data, sizeof(__le32));
 
 
  73	if (ret == sizeof(__le32))
  74		data = get_unaligned_le32(usb->data);
  75	trace_usb_reg_rr(dev, addr, data);
  76
  77	return data;
  78}
  79EXPORT_SYMBOL_GPL(___mt76u_rr);
  80
  81static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  82{
  83	u8 req;
  84
  85	switch (addr & MT_VEND_TYPE_MASK) {
  86	case MT_VEND_TYPE_EEPROM:
  87		req = MT_VEND_READ_EEPROM;
  88		break;
  89	case MT_VEND_TYPE_CFG:
  90		req = MT_VEND_READ_CFG;
  91		break;
  92	default:
  93		req = MT_VEND_MULTI_READ;
  94		break;
  95	}
  96
  97	return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
  98			   addr & ~MT_VEND_TYPE_MASK);
  99}
 100
 101static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
 102{
 103	u32 ret;
 104
 105	mutex_lock(&dev->usb.usb_ctrl_mtx);
 106	ret = __mt76u_rr(dev, addr);
 107	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 108
 109	return ret;
 110}
 111
 112void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
 113		 u32 addr, u32 val)
 
 
 
 
 
 
 
 
 
 
 
 114{
 115	struct mt76_usb *usb = &dev->usb;
 116
 117	put_unaligned_le32(val, usb->data);
 118	__mt76u_vendor_request(dev, req, req_type, addr >> 16,
 119			       addr, usb->data, sizeof(__le32));
 
 
 120	trace_usb_reg_wr(dev, addr, val);
 121}
 122EXPORT_SYMBOL_GPL(___mt76u_wr);
 123
 124static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 125{
 126	u8 req;
 127
 128	switch (addr & MT_VEND_TYPE_MASK) {
 129	case MT_VEND_TYPE_CFG:
 130		req = MT_VEND_WRITE_CFG;
 131		break;
 132	default:
 133		req = MT_VEND_MULTI_WRITE;
 134		break;
 135	}
 136	___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
 137		    addr & ~MT_VEND_TYPE_MASK, val);
 138}
 139
 140static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 141{
 142	mutex_lock(&dev->usb.usb_ctrl_mtx);
 143	__mt76u_wr(dev, addr, val);
 144	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 145}
 146
 
 
 
 
 
 
 
 147static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
 148		     u32 mask, u32 val)
 149{
 150	mutex_lock(&dev->usb.usb_ctrl_mtx);
 151	val |= __mt76u_rr(dev, addr) & ~mask;
 152	__mt76u_wr(dev, addr, val);
 153	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 154
 155	return val;
 156}
 157
 
 
 
 
 
 
 
 
 
 
 
 158static void mt76u_copy(struct mt76_dev *dev, u32 offset,
 159		       const void *data, int len)
 160{
 161	struct mt76_usb *usb = &dev->usb;
 162	const u8 *val = data;
 163	int ret;
 164	int current_batch_size;
 165	int i = 0;
 166
 167	/* Assure that always a multiple of 4 bytes are copied,
 168	 * otherwise beacons can be corrupted.
 169	 * See: "mt76: round up length on mt76_wr_copy"
 170	 * Commit 850e8f6fbd5d0003b0
 171	 */
 172	len = round_up(len, 4);
 173
 174	mutex_lock(&usb->usb_ctrl_mtx);
 175	while (i < len) {
 176		current_batch_size = min_t(int, usb->data_len, len - i);
 177		memcpy(usb->data, val + i, current_batch_size);
 178		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
 179					     USB_DIR_OUT | USB_TYPE_VENDOR,
 180					     0, offset + i, usb->data,
 181					     current_batch_size);
 182		if (ret < 0)
 183			break;
 184
 185		i += current_batch_size;
 186	}
 187	mutex_unlock(&usb->usb_ctrl_mtx);
 188}
 189
 190void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
 191		     void *data, int len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 192{
 193	struct mt76_usb *usb = &dev->usb;
 194	int i = 0, batch_len, ret;
 195	u8 *val = data;
 196
 197	len = round_up(len, 4);
 198	mutex_lock(&usb->usb_ctrl_mtx);
 199	while (i < len) {
 200		batch_len = min_t(int, usb->data_len, len - i);
 201		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
 202					     USB_DIR_IN | USB_TYPE_VENDOR,
 203					     (offset + i) >> 16, offset + i,
 204					     usb->data, batch_len);
 205		if (ret < 0)
 206			break;
 207
 208		memcpy(val + i, usb->data, batch_len);
 209		i += batch_len;
 210	}
 211	mutex_unlock(&usb->usb_ctrl_mtx);
 212}
 213EXPORT_SYMBOL_GPL(mt76u_read_copy);
 214
 215void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 216		     const u16 offset, const u32 val)
 217{
 218	mutex_lock(&dev->usb.usb_ctrl_mtx);
 219	__mt76u_vendor_request(dev, req,
 220			       USB_DIR_OUT | USB_TYPE_VENDOR,
 221			       val & 0xffff, offset, NULL, 0);
 222	__mt76u_vendor_request(dev, req,
 223			       USB_DIR_OUT | USB_TYPE_VENDOR,
 224			       val >> 16, offset + 2, NULL, 0);
 225	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 226}
 227EXPORT_SYMBOL_GPL(mt76u_single_wr);
 228
 229static int
 230mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
 231		const struct mt76_reg_pair *data, int len)
 232{
 233	struct mt76_usb *usb = &dev->usb;
 234
 235	mutex_lock(&usb->usb_ctrl_mtx);
 236	while (len > 0) {
 237		__mt76u_wr(dev, base + data->reg, data->value);
 238		len--;
 239		data++;
 240	}
 241	mutex_unlock(&usb->usb_ctrl_mtx);
 242
 243	return 0;
 244}
 245
 246static int
 247mt76u_wr_rp(struct mt76_dev *dev, u32 base,
 248	    const struct mt76_reg_pair *data, int n)
 249{
 250	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
 251		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
 252	else
 253		return mt76u_req_wr_rp(dev, base, data, n);
 254}
 255
 256static int
 257mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
 258		int len)
 259{
 260	struct mt76_usb *usb = &dev->usb;
 261
 262	mutex_lock(&usb->usb_ctrl_mtx);
 263	while (len > 0) {
 264		data->value = __mt76u_rr(dev, base + data->reg);
 265		len--;
 266		data++;
 267	}
 268	mutex_unlock(&usb->usb_ctrl_mtx);
 269
 270	return 0;
 271}
 272
 273static int
 274mt76u_rd_rp(struct mt76_dev *dev, u32 base,
 275	    struct mt76_reg_pair *data, int n)
 276{
 277	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
 278		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
 279	else
 280		return mt76u_req_rd_rp(dev, base, data, n);
 281}
 282
 283static bool mt76u_check_sg(struct mt76_dev *dev)
 284{
 285	struct usb_interface *uintf = to_usb_interface(dev->dev);
 286	struct usb_device *udev = interface_to_usbdev(uintf);
 287
 288	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
 289		udev->bus->no_sg_constraint);
 
 290}
 291
 292static int
 293mt76u_set_endpoints(struct usb_interface *intf,
 294		    struct mt76_usb *usb)
 295{
 296	struct usb_host_interface *intf_desc = intf->cur_altsetting;
 297	struct usb_endpoint_descriptor *ep_desc;
 298	int i, in_ep = 0, out_ep = 0;
 299
 300	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
 301		ep_desc = &intf_desc->endpoint[i].desc;
 302
 303		if (usb_endpoint_is_bulk_in(ep_desc) &&
 304		    in_ep < __MT_EP_IN_MAX) {
 305			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
 306			in_ep++;
 307		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
 308			   out_ep < __MT_EP_OUT_MAX) {
 309			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
 310			out_ep++;
 311		}
 312	}
 313
 314	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
 315		return -EINVAL;
 316	return 0;
 317}
 318
 319static int
 320mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
 321		 int nsgs)
 322{
 323	int i;
 324
 325	for (i = 0; i < nsgs; i++) {
 
 326		void *data;
 327		int offset;
 328
 329		data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
 330		if (!data)
 331			break;
 332
 333		sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
 334			    offset);
 
 335	}
 336
 337	if (i < nsgs) {
 338		int j;
 339
 340		for (j = nsgs; j < urb->num_sgs; j++)
 341			mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
 342		urb->num_sgs = i;
 343	}
 344
 345	urb->num_sgs = max_t(int, i, urb->num_sgs);
 346	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
 347	sg_init_marker(urb->sg, urb->num_sgs);
 348
 349	return i ? : -ENOMEM;
 350}
 351
 352static int
 353mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
 354		struct urb *urb, int nsgs)
 355{
 356	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
 357	int offset;
 358
 359	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
 360		return mt76u_fill_rx_sg(dev, q, urb, nsgs);
 361
 362	urb->transfer_buffer_length = q->buf_size;
 363	urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
 364
 365	return urb->transfer_buffer ? 0 : -ENOMEM;
 366}
 367
 368static int
 369mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
 370		int sg_max_size)
 371{
 372	unsigned int size = sizeof(struct urb);
 373
 374	if (dev->usb.sg_en)
 375		size += sg_max_size * sizeof(struct scatterlist);
 376
 377	e->urb = kzalloc(size, GFP_KERNEL);
 378	if (!e->urb)
 379		return -ENOMEM;
 380
 381	usb_init_urb(e->urb);
 382
 383	if (dev->usb.sg_en && sg_max_size > 0)
 384		e->urb->sg = (struct scatterlist *)(e->urb + 1);
 385
 386	return 0;
 387}
 388
 389static int
 390mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
 391		   struct mt76_queue_entry *e)
 392{
 393	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
 394	int err, sg_size;
 395
 396	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
 397	err = mt76u_urb_alloc(dev, e, sg_size);
 398	if (err)
 399		return err;
 400
 401	return mt76u_refill_rx(dev, q, e->urb, sg_size);
 402}
 403
 404static void mt76u_urb_free(struct urb *urb)
 405{
 406	int i;
 407
 408	for (i = 0; i < urb->num_sgs; i++)
 409		mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
 410
 411	if (urb->transfer_buffer)
 412		mt76_put_page_pool_buf(urb->transfer_buffer, false);
 413
 414	usb_free_urb(urb);
 415}
 416
 417static void
 418mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
 419		    struct urb *urb, usb_complete_t complete_fn,
 420		    void *context)
 421{
 422	struct usb_interface *uintf = to_usb_interface(dev->dev);
 423	struct usb_device *udev = interface_to_usbdev(uintf);
 424	unsigned int pipe;
 425
 426	if (dir == USB_DIR_IN)
 427		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
 428	else
 429		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
 430
 431	urb->dev = udev;
 432	urb->pipe = pipe;
 433	urb->complete = complete_fn;
 434	urb->context = context;
 435}
 436
 437static struct urb *
 438mt76u_get_next_rx_entry(struct mt76_queue *q)
 439{
 440	struct urb *urb = NULL;
 441	unsigned long flags;
 442
 443	spin_lock_irqsave(&q->lock, flags);
 444	if (q->queued > 0) {
 445		urb = q->entry[q->tail].urb;
 446		q->tail = (q->tail + 1) % q->ndesc;
 447		q->queued--;
 448	}
 449	spin_unlock_irqrestore(&q->lock, flags);
 450
 451	return urb;
 452}
 453
 454static int
 455mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
 456		       u32 data_len)
 457{
 458	u16 dma_len, min_len;
 459
 460	dma_len = get_unaligned_le16(data);
 461	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
 462		return dma_len;
 463
 464	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
 465	if (data_len < min_len || !dma_len ||
 466	    dma_len + MT_DMA_HDR_LEN > data_len ||
 467	    (dma_len & 0x3))
 468		return -EINVAL;
 469	return dma_len;
 470}
 471
 472static struct sk_buff *
 473mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
 474		   int len, int buf_size)
 475{
 476	int head_room, drv_flags = dev->drv->drv_flags;
 477	struct sk_buff *skb;
 478
 479	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
 480	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
 481		struct page *page;
 482
 483		/* slow path, not enough space for data and
 484		 * skb_shared_info
 485		 */
 486		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
 487		if (!skb)
 488			return NULL;
 489
 490		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
 491		data += head_room + MT_SKB_HEAD_LEN;
 492		page = virt_to_head_page(data);
 493		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 494				page, data - page_address(page),
 495				len - MT_SKB_HEAD_LEN, buf_size);
 496
 497		return skb;
 498	}
 499
 500	/* fast path */
 501	skb = build_skb(data, buf_size);
 502	if (!skb)
 503		return NULL;
 504
 505	skb_reserve(skb, head_room);
 506	__skb_put(skb, len);
 507
 508	return skb;
 509}
 510
 511static int
 512mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
 513		       int buf_size)
 514{
 515	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
 516	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
 517	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
 518	struct sk_buff *skb;
 519
 520	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
 521		return 0;
 522
 523	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
 524	if (len < 0)
 525		return 0;
 526
 527	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
 528	data_len = min_t(int, len, data_len - head_room);
 529
 530	if (len == data_len &&
 531	    dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
 532		return 0;
 533
 534	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
 535	if (!skb)
 536		return 0;
 537
 538	len -= data_len;
 539	while (len > 0 && nsgs < urb->num_sgs) {
 540		data_len = min_t(int, len, urb->sg[nsgs].length);
 541		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 542				sg_page(&urb->sg[nsgs]),
 543				urb->sg[nsgs].offset, data_len,
 544				buf_size);
 545		len -= data_len;
 546		nsgs++;
 547	}
 548
 549	skb_mark_for_recycle(skb);
 550	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
 551
 552	return nsgs;
 553}
 554
 555static void mt76u_complete_rx(struct urb *urb)
 556{
 557	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 558	struct mt76_queue *q = urb->context;
 559	unsigned long flags;
 560
 561	trace_rx_urb(dev, urb);
 562
 563	switch (urb->status) {
 564	case -ECONNRESET:
 565	case -ESHUTDOWN:
 566	case -ENOENT:
 567	case -EPROTO:
 568		return;
 569	default:
 570		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 571				    urb->status);
 572		fallthrough;
 573	case 0:
 574		break;
 575	}
 576
 577	spin_lock_irqsave(&q->lock, flags);
 578	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
 579		goto out;
 580
 581	q->head = (q->head + 1) % q->ndesc;
 582	q->queued++;
 583	mt76_worker_schedule(&dev->usb.rx_worker);
 584out:
 585	spin_unlock_irqrestore(&q->lock, flags);
 586}
 587
 588static int
 589mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
 590		    struct urb *urb)
 591{
 592	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
 593
 594	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
 595			    mt76u_complete_rx, &dev->q_rx[qid]);
 596	trace_submit_urb(dev, urb);
 597
 598	return usb_submit_urb(urb, GFP_ATOMIC);
 599}
 600
 601static void
 602mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 603{
 604	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
 605	struct urb *urb;
 606	int err, count;
 607
 608	while (true) {
 609		urb = mt76u_get_next_rx_entry(q);
 610		if (!urb)
 611			break;
 612
 613		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
 614		if (count > 0) {
 615			err = mt76u_refill_rx(dev, q, urb, count);
 616			if (err < 0)
 617				break;
 618		}
 619		mt76u_submit_rx_buf(dev, qid, urb);
 620	}
 621	if (qid == MT_RXQ_MAIN) {
 622		local_bh_disable();
 623		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
 624		local_bh_enable();
 625	}
 626}
 627
 628static void mt76u_rx_worker(struct mt76_worker *w)
 629{
 630	struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
 631	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
 632	int i;
 633
 634	rcu_read_lock();
 635	mt76_for_each_q_rx(dev, i)
 636		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
 637	rcu_read_unlock();
 638}
 639
 640static int
 641mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
 642{
 643	struct mt76_queue *q = &dev->q_rx[qid];
 644	unsigned long flags;
 645	int i, err = 0;
 646
 647	spin_lock_irqsave(&q->lock, flags);
 648	for (i = 0; i < q->ndesc; i++) {
 649		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
 650		if (err < 0)
 651			break;
 652	}
 653	q->head = q->tail = 0;
 654	q->queued = 0;
 655	spin_unlock_irqrestore(&q->lock, flags);
 656
 657	return err;
 658}
 659
 660static int
 661mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
 662{
 663	struct mt76_queue *q = &dev->q_rx[qid];
 664	int i, err;
 665
 666	err = mt76_create_page_pool(dev, q);
 667	if (err)
 668		return err;
 669
 670	spin_lock_init(&q->lock);
 671	q->entry = devm_kcalloc(dev->dev,
 672				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
 673				GFP_KERNEL);
 674	if (!q->entry)
 675		return -ENOMEM;
 676
 677	q->ndesc = MT_NUM_RX_ENTRIES;
 678	q->buf_size = PAGE_SIZE;
 679
 680	for (i = 0; i < q->ndesc; i++) {
 681		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
 682		if (err < 0)
 683			return err;
 684	}
 685
 686	return mt76u_submit_rx_buffers(dev, qid);
 687}
 688
 689int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
 690{
 691	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
 692}
 693EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
 694
 695static void
 696mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 697{
 
 698	int i;
 699
 700	for (i = 0; i < q->ndesc; i++) {
 701		if (!q->entry[i].urb)
 702			continue;
 703
 704		mt76u_urb_free(q->entry[i].urb);
 705		q->entry[i].urb = NULL;
 706	}
 707	page_pool_destroy(q->page_pool);
 708	q->page_pool = NULL;
 
 
 
 
 
 709}
 710
 711static void mt76u_free_rx(struct mt76_dev *dev)
 712{
 713	int i;
 714
 715	mt76_worker_teardown(&dev->usb.rx_worker);
 716
 717	mt76_for_each_q_rx(dev, i)
 718		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
 719}
 720
 721void mt76u_stop_rx(struct mt76_dev *dev)
 722{
 723	int i;
 724
 725	mt76_worker_disable(&dev->usb.rx_worker);
 726
 727	mt76_for_each_q_rx(dev, i) {
 728		struct mt76_queue *q = &dev->q_rx[i];
 729		int j;
 730
 731		for (j = 0; j < q->ndesc; j++)
 732			usb_poison_urb(q->entry[j].urb);
 733	}
 734}
 735EXPORT_SYMBOL_GPL(mt76u_stop_rx);
 736
 737int mt76u_resume_rx(struct mt76_dev *dev)
 738{
 739	int i;
 740
 741	mt76_for_each_q_rx(dev, i) {
 742		struct mt76_queue *q = &dev->q_rx[i];
 743		int err, j;
 744
 745		for (j = 0; j < q->ndesc; j++)
 746			usb_unpoison_urb(q->entry[j].urb);
 747
 748		err = mt76u_submit_rx_buffers(dev, i);
 749		if (err < 0)
 750			return err;
 751	}
 752
 753	mt76_worker_enable(&dev->usb.rx_worker);
 754
 755	return 0;
 756}
 757EXPORT_SYMBOL_GPL(mt76u_resume_rx);
 758
 759static void mt76u_status_worker(struct mt76_worker *w)
 760{
 761	struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
 762	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
 763	struct mt76_queue_entry entry;
 764	struct mt76_queue *q;
 765	int i;
 766
 767	if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
 768		return;
 769
 770	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 771		q = dev->phy.q_tx[i];
 772		if (!q)
 773			continue;
 774
 775		while (q->queued > 0) {
 776			if (!q->entry[q->tail].done)
 777				break;
 778
 779			entry = q->entry[q->tail];
 780			q->entry[q->tail].done = false;
 781
 782			mt76_queue_tx_complete(dev, q, &entry);
 783		}
 784
 785		if (!q->queued)
 786			wake_up(&dev->tx_wait);
 787
 788		mt76_worker_schedule(&dev->tx_worker);
 789	}
 790
 791	if (dev->drv->tx_status_data &&
 792	    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
 793		queue_work(dev->wq, &dev->usb.stat_work);
 
 794}
 795
 796static void mt76u_tx_status_data(struct work_struct *work)
 797{
 798	struct mt76_usb *usb;
 799	struct mt76_dev *dev;
 800	u8 update = 1;
 801	u16 count = 0;
 802
 803	usb = container_of(work, struct mt76_usb, stat_work);
 804	dev = container_of(usb, struct mt76_dev, usb);
 805
 806	while (true) {
 807		if (test_bit(MT76_REMOVED, &dev->phy.state))
 808			break;
 809
 810		if (!dev->drv->tx_status_data(dev, &update))
 811			break;
 812		count++;
 813	}
 814
 815	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
 816		queue_work(dev->wq, &usb->stat_work);
 817	else
 818		clear_bit(MT76_READING_STATS, &dev->phy.state);
 819}
 820
 821static void mt76u_complete_tx(struct urb *urb)
 822{
 823	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 824	struct mt76_queue_entry *e = urb->context;
 825
 826	if (mt76u_urb_error(urb))
 827		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
 828	e->done = true;
 829
 830	mt76_worker_schedule(&dev->usb.status_worker);
 831}
 832
 833static int
 834mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
 835		       struct urb *urb)
 836{
 837	urb->transfer_buffer_length = skb->len;
 838
 839	if (!dev->usb.sg_en) {
 840		urb->transfer_buffer = skb->data;
 841		return 0;
 842	}
 843
 844	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
 845	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
 846	if (!urb->num_sgs)
 847		return -ENOMEM;
 848
 849	return urb->num_sgs;
 850}
 851
 852static int
 853mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 854		   enum mt76_txq_id qid, struct sk_buff *skb,
 855		   struct mt76_wcid *wcid, struct ieee80211_sta *sta)
 856{
 857	struct mt76_tx_info tx_info = {
 858		.skb = skb,
 859	};
 860	u16 idx = q->head;
 861	int err;
 862
 863	if (q->queued == q->ndesc)
 864		return -ENOSPC;
 865
 866	skb->prev = skb->next = NULL;
 867	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
 868	if (err < 0)
 869		return err;
 870
 871	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
 872	if (err < 0)
 873		return err;
 874
 875	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
 876			    q->entry[idx].urb, mt76u_complete_tx,
 877			    &q->entry[idx]);
 878
 879	q->head = (q->head + 1) % q->ndesc;
 880	q->entry[idx].skb = tx_info.skb;
 881	q->entry[idx].wcid = 0xffff;
 882	q->queued++;
 883
 884	return idx;
 885}
 886
 887static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
 888{
 889	struct urb *urb;
 890	int err;
 891
 892	while (q->first != q->head) {
 893		urb = q->entry[q->first].urb;
 894
 895		trace_submit_urb(dev, urb);
 896		err = usb_submit_urb(urb, GFP_ATOMIC);
 897		if (err < 0) {
 898			if (err == -ENODEV)
 899				set_bit(MT76_REMOVED, &dev->phy.state);
 900			else
 901				dev_err(dev->dev, "tx urb submit failed:%d\n",
 902					err);
 903			break;
 904		}
 905		q->first = (q->first + 1) % q->ndesc;
 906	}
 907}
 908
 909static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
 910{
 911	if (mt76_chip(dev) == 0x7663) {
 912		static const u8 lmac_queue_map[] = {
 913			/* ac to lmac mapping */
 914			[IEEE80211_AC_BK] = 0,
 915			[IEEE80211_AC_BE] = 1,
 916			[IEEE80211_AC_VI] = 2,
 917			[IEEE80211_AC_VO] = 4,
 918		};
 919
 920		if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
 921			return 1; /* BE */
 922
 923		return lmac_queue_map[ac];
 924	}
 925
 926	return mt76_ac_to_hwq(ac);
 927}
 928
 929static int mt76u_alloc_tx(struct mt76_dev *dev)
 930{
 931	struct mt76_queue *q;
 932	int i, j, err;
 933
 934	for (i = 0; i <= MT_TXQ_PSD; i++) {
 935		if (i >= IEEE80211_NUM_ACS) {
 936			dev->phy.q_tx[i] = dev->phy.q_tx[0];
 937			continue;
 938		}
 939
 940		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
 941		if (!q)
 942			return -ENOMEM;
 943
 944		spin_lock_init(&q->lock);
 945		q->hw_idx = mt76u_ac_to_hwq(dev, i);
 
 946
 947		dev->phy.q_tx[i] = q;
 948
 949		q->entry = devm_kcalloc(dev->dev,
 950					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
 951					GFP_KERNEL);
 952		if (!q->entry)
 953			return -ENOMEM;
 954
 955		q->ndesc = MT_NUM_TX_ENTRIES;
 956		for (j = 0; j < q->ndesc; j++) {
 957			err = mt76u_urb_alloc(dev, &q->entry[j],
 958					      MT_TX_SG_MAX_SIZE);
 959			if (err < 0)
 960				return err;
 961		}
 962	}
 963	return 0;
 964}
 965
 966static void mt76u_free_tx(struct mt76_dev *dev)
 967{
 968	int i;
 969
 970	mt76_worker_teardown(&dev->usb.status_worker);
 971
 972	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 973		struct mt76_queue *q;
 974		int j;
 975
 976		q = dev->phy.q_tx[i];
 977		if (!q)
 978			continue;
 979
 980		for (j = 0; j < q->ndesc; j++) {
 981			usb_free_urb(q->entry[j].urb);
 982			q->entry[j].urb = NULL;
 983		}
 984	}
 985}
 986
 987void mt76u_stop_tx(struct mt76_dev *dev)
 988{
 989	int ret;
 990
 991	mt76_worker_disable(&dev->usb.status_worker);
 992
 993	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
 994				 HZ / 5);
 995	if (!ret) {
 996		struct mt76_queue_entry entry;
 997		struct mt76_queue *q;
 998		int i, j;
 999
1000		dev_err(dev->dev, "timed out waiting for pending tx\n");
1001
1002		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1003			q = dev->phy.q_tx[i];
1004			if (!q)
1005				continue;
1006
1007			for (j = 0; j < q->ndesc; j++)
1008				usb_kill_urb(q->entry[j].urb);
1009		}
1010
1011		mt76_worker_disable(&dev->tx_worker);
1012
1013		/* On device removal we maight queue skb's, but mt76u_tx_kick()
1014		 * will fail to submit urb, cleanup those skb's manually.
1015		 */
1016		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1017			q = dev->phy.q_tx[i];
1018			if (!q)
1019				continue;
1020
1021			while (q->queued > 0) {
1022				entry = q->entry[q->tail];
1023				q->entry[q->tail].done = false;
1024				mt76_queue_tx_complete(dev, q, &entry);
1025			}
1026		}
1027
1028		mt76_worker_enable(&dev->tx_worker);
1029	}
1030
1031	cancel_work_sync(&dev->usb.stat_work);
1032	clear_bit(MT76_READING_STATS, &dev->phy.state);
1033
1034	mt76_worker_enable(&dev->usb.status_worker);
1035
1036	mt76_tx_status_check(dev, true);
1037}
1038EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1039
1040void mt76u_queues_deinit(struct mt76_dev *dev)
1041{
1042	mt76u_stop_rx(dev);
1043	mt76u_stop_tx(dev);
1044
1045	mt76u_free_rx(dev);
1046	mt76u_free_tx(dev);
1047}
1048EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1049
1050int mt76u_alloc_queues(struct mt76_dev *dev)
1051{
1052	int err;
1053
1054	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1055	if (err < 0)
1056		return err;
1057
1058	return mt76u_alloc_tx(dev);
1059}
1060EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1061
1062static const struct mt76_queue_ops usb_queue_ops = {
1063	.tx_queue_skb = mt76u_tx_queue_skb,
1064	.kick = mt76u_tx_kick,
1065};
1066
1067int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1068		 struct mt76_bus_ops *ops)
1069{
 
 
 
 
 
 
1070	struct usb_device *udev = interface_to_usbdev(intf);
1071	struct mt76_usb *usb = &dev->usb;
1072	int err;
1073
 
 
 
 
 
1074	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1075
1076	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
1077	if (usb->data_len < 32)
1078		usb->data_len = 32;
1079
1080	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1081	if (!usb->data)
1082		return -ENOMEM;
1083
1084	mutex_init(&usb->usb_ctrl_mtx);
1085	dev->bus = ops;
1086	dev->queue_ops = &usb_queue_ops;
1087
1088	dev_set_drvdata(&udev->dev, dev);
1089
1090	usb->sg_en = mt76u_check_sg(dev);
1091
1092	err = mt76u_set_endpoints(intf, usb);
1093	if (err < 0)
1094		return err;
1095
1096	err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1097				"usb-rx");
1098	if (err)
1099		return err;
1100
1101	err = mt76_worker_setup(dev->hw, &usb->status_worker,
1102				mt76u_status_worker, "usb-status");
1103	if (err)
1104		return err;
1105
1106	sched_set_fifo_low(usb->rx_worker.task);
1107	sched_set_fifo_low(usb->status_worker.task);
1108
1109	return 0;
1110}
1111EXPORT_SYMBOL_GPL(__mt76u_init);
1112
1113int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
1114{
1115	static struct mt76_bus_ops bus_ops = {
1116		.rr = mt76u_rr,
1117		.wr = mt76u_wr,
1118		.rmw = mt76u_rmw,
1119		.read_copy = mt76u_read_copy,
1120		.write_copy = mt76u_copy,
1121		.wr_rp = mt76u_wr_rp,
1122		.rd_rp = mt76u_rd_rp,
1123		.type = MT76_BUS_USB,
1124	};
1125
1126	return __mt76u_init(dev, intf, &bus_ops);
1127}
1128EXPORT_SYMBOL_GPL(mt76u_init);
1129
1130MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1131MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
1132MODULE_LICENSE("Dual BSD/GPL");
v5.14.15
   1// SPDX-License-Identifier: ISC
   2/*
   3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
   4 */
   5
   6#include <linux/module.h>
   7#include "mt76.h"
   8#include "usb_trace.h"
   9#include "dma.h"
  10
  11#define MT_VEND_REQ_MAX_RETRY	10
  12#define MT_VEND_REQ_TOUT_MS	300
  13
  14static bool disable_usb_sg;
  15module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
  16MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
  17
  18static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  19				  u8 req_type, u16 val, u16 offset,
  20				  void *buf, size_t len)
  21{
  22	struct usb_interface *uintf = to_usb_interface(dev->dev);
  23	struct usb_device *udev = interface_to_usbdev(uintf);
  24	unsigned int pipe;
  25	int i, ret;
  26
  27	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
  28
  29	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
  30				       : usb_sndctrlpipe(udev, 0);
  31	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
  32		if (test_bit(MT76_REMOVED, &dev->phy.state))
  33			return -EIO;
  34
  35		ret = usb_control_msg(udev, pipe, req, req_type, val,
  36				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
  37		if (ret == -ENODEV)
  38			set_bit(MT76_REMOVED, &dev->phy.state);
  39		if (ret >= 0 || ret == -ENODEV)
  40			return ret;
  41		usleep_range(5000, 10000);
  42	}
  43
  44	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
  45		req, offset, ret);
  46	return ret;
  47}
 
  48
  49int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  50			 u8 req_type, u16 val, u16 offset,
  51			 void *buf, size_t len)
  52{
  53	int ret;
  54
  55	mutex_lock(&dev->usb.usb_ctrl_mtx);
  56	ret = __mt76u_vendor_request(dev, req, req_type,
  57				     val, offset, buf, len);
  58	trace_usb_reg_wr(dev, offset, val);
  59	mutex_unlock(&dev->usb.usb_ctrl_mtx);
  60
  61	return ret;
  62}
  63EXPORT_SYMBOL_GPL(mt76u_vendor_request);
  64
  65static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
  66{
  67	struct mt76_usb *usb = &dev->usb;
  68	u32 data = ~0;
  69	int ret;
  70
  71	ret = __mt76u_vendor_request(dev, req,
  72				     USB_DIR_IN | USB_TYPE_VENDOR,
  73				     addr >> 16, addr, usb->data,
  74				     sizeof(__le32));
  75	if (ret == sizeof(__le32))
  76		data = get_unaligned_le32(usb->data);
  77	trace_usb_reg_rr(dev, addr, data);
  78
  79	return data;
  80}
 
  81
  82static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
  83{
  84	u8 req;
  85
  86	switch (addr & MT_VEND_TYPE_MASK) {
  87	case MT_VEND_TYPE_EEPROM:
  88		req = MT_VEND_READ_EEPROM;
  89		break;
  90	case MT_VEND_TYPE_CFG:
  91		req = MT_VEND_READ_CFG;
  92		break;
  93	default:
  94		req = MT_VEND_MULTI_READ;
  95		break;
  96	}
  97
  98	return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
 
  99}
 100
 101static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
 102{
 103	u32 ret;
 104
 105	mutex_lock(&dev->usb.usb_ctrl_mtx);
 106	ret = __mt76u_rr(dev, addr);
 107	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 108
 109	return ret;
 110}
 111
 112static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
 113{
 114	u32 ret;
 115
 116	mutex_lock(&dev->usb.usb_ctrl_mtx);
 117	ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
 118	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 119
 120	return ret;
 121}
 122
 123static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
 124			u32 addr, u32 val)
 125{
 126	struct mt76_usb *usb = &dev->usb;
 127
 128	put_unaligned_le32(val, usb->data);
 129	__mt76u_vendor_request(dev, req,
 130			       USB_DIR_OUT | USB_TYPE_VENDOR,
 131			       addr >> 16, addr, usb->data,
 132			       sizeof(__le32));
 133	trace_usb_reg_wr(dev, addr, val);
 134}
 
 135
 136static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 137{
 138	u8 req;
 139
 140	switch (addr & MT_VEND_TYPE_MASK) {
 141	case MT_VEND_TYPE_CFG:
 142		req = MT_VEND_WRITE_CFG;
 143		break;
 144	default:
 145		req = MT_VEND_MULTI_WRITE;
 146		break;
 147	}
 148	___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
 
 149}
 150
 151static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
 152{
 153	mutex_lock(&dev->usb.usb_ctrl_mtx);
 154	__mt76u_wr(dev, addr, val);
 155	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 156}
 157
 158static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
 159{
 160	mutex_lock(&dev->usb.usb_ctrl_mtx);
 161	___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
 162	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 163}
 164
 165static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
 166		     u32 mask, u32 val)
 167{
 168	mutex_lock(&dev->usb.usb_ctrl_mtx);
 169	val |= __mt76u_rr(dev, addr) & ~mask;
 170	__mt76u_wr(dev, addr, val);
 171	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 172
 173	return val;
 174}
 175
 176static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
 177			 u32 mask, u32 val)
 178{
 179	mutex_lock(&dev->usb.usb_ctrl_mtx);
 180	val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
 181	___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
 182	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 183
 184	return val;
 185}
 186
 187static void mt76u_copy(struct mt76_dev *dev, u32 offset,
 188		       const void *data, int len)
 189{
 190	struct mt76_usb *usb = &dev->usb;
 191	const u8 *val = data;
 192	int ret;
 193	int current_batch_size;
 194	int i = 0;
 195
 196	/* Assure that always a multiple of 4 bytes are copied,
 197	 * otherwise beacons can be corrupted.
 198	 * See: "mt76: round up length on mt76_wr_copy"
 199	 * Commit 850e8f6fbd5d0003b0
 200	 */
 201	len = round_up(len, 4);
 202
 203	mutex_lock(&usb->usb_ctrl_mtx);
 204	while (i < len) {
 205		current_batch_size = min_t(int, usb->data_len, len - i);
 206		memcpy(usb->data, val + i, current_batch_size);
 207		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
 208					     USB_DIR_OUT | USB_TYPE_VENDOR,
 209					     0, offset + i, usb->data,
 210					     current_batch_size);
 211		if (ret < 0)
 212			break;
 213
 214		i += current_batch_size;
 215	}
 216	mutex_unlock(&usb->usb_ctrl_mtx);
 217}
 218
 219static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
 220			   const void *data, int len)
 221{
 222	struct mt76_usb *usb = &dev->usb;
 223	int ret, i = 0, batch_len;
 224	const u8 *val = data;
 225
 226	len = round_up(len, 4);
 227	mutex_lock(&usb->usb_ctrl_mtx);
 228	while (i < len) {
 229		batch_len = min_t(int, usb->data_len, len - i);
 230		memcpy(usb->data, val + i, batch_len);
 231		ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
 232					     USB_DIR_OUT | USB_TYPE_VENDOR,
 233					     (offset + i) >> 16, offset + i,
 234					     usb->data, batch_len);
 235		if (ret < 0)
 236			break;
 237
 238		i += batch_len;
 239	}
 240	mutex_unlock(&usb->usb_ctrl_mtx);
 241}
 242
 243static void
 244mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
 245		    void *data, int len)
 246{
 247	struct mt76_usb *usb = &dev->usb;
 248	int i = 0, batch_len, ret;
 249	u8 *val = data;
 250
 251	len = round_up(len, 4);
 252	mutex_lock(&usb->usb_ctrl_mtx);
 253	while (i < len) {
 254		batch_len = min_t(int, usb->data_len, len - i);
 255		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
 256					     USB_DIR_IN | USB_TYPE_VENDOR,
 257					     (offset + i) >> 16, offset + i,
 258					     usb->data, batch_len);
 259		if (ret < 0)
 260			break;
 261
 262		memcpy(val + i, usb->data, batch_len);
 263		i += batch_len;
 264	}
 265	mutex_unlock(&usb->usb_ctrl_mtx);
 266}
 
 267
 268void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 269		     const u16 offset, const u32 val)
 270{
 271	mutex_lock(&dev->usb.usb_ctrl_mtx);
 272	__mt76u_vendor_request(dev, req,
 273			       USB_DIR_OUT | USB_TYPE_VENDOR,
 274			       val & 0xffff, offset, NULL, 0);
 275	__mt76u_vendor_request(dev, req,
 276			       USB_DIR_OUT | USB_TYPE_VENDOR,
 277			       val >> 16, offset + 2, NULL, 0);
 278	mutex_unlock(&dev->usb.usb_ctrl_mtx);
 279}
 280EXPORT_SYMBOL_GPL(mt76u_single_wr);
 281
 282static int
 283mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
 284		const struct mt76_reg_pair *data, int len)
 285{
 286	struct mt76_usb *usb = &dev->usb;
 287
 288	mutex_lock(&usb->usb_ctrl_mtx);
 289	while (len > 0) {
 290		__mt76u_wr(dev, base + data->reg, data->value);
 291		len--;
 292		data++;
 293	}
 294	mutex_unlock(&usb->usb_ctrl_mtx);
 295
 296	return 0;
 297}
 298
 299static int
 300mt76u_wr_rp(struct mt76_dev *dev, u32 base,
 301	    const struct mt76_reg_pair *data, int n)
 302{
 303	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
 304		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
 305	else
 306		return mt76u_req_wr_rp(dev, base, data, n);
 307}
 308
 309static int
 310mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
 311		int len)
 312{
 313	struct mt76_usb *usb = &dev->usb;
 314
 315	mutex_lock(&usb->usb_ctrl_mtx);
 316	while (len > 0) {
 317		data->value = __mt76u_rr(dev, base + data->reg);
 318		len--;
 319		data++;
 320	}
 321	mutex_unlock(&usb->usb_ctrl_mtx);
 322
 323	return 0;
 324}
 325
 326static int
 327mt76u_rd_rp(struct mt76_dev *dev, u32 base,
 328	    struct mt76_reg_pair *data, int n)
 329{
 330	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
 331		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
 332	else
 333		return mt76u_req_rd_rp(dev, base, data, n);
 334}
 335
 336static bool mt76u_check_sg(struct mt76_dev *dev)
 337{
 338	struct usb_interface *uintf = to_usb_interface(dev->dev);
 339	struct usb_device *udev = interface_to_usbdev(uintf);
 340
 341	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
 342		(udev->bus->no_sg_constraint ||
 343		 udev->speed == USB_SPEED_WIRELESS));
 344}
 345
 346static int
 347mt76u_set_endpoints(struct usb_interface *intf,
 348		    struct mt76_usb *usb)
 349{
 350	struct usb_host_interface *intf_desc = intf->cur_altsetting;
 351	struct usb_endpoint_descriptor *ep_desc;
 352	int i, in_ep = 0, out_ep = 0;
 353
 354	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
 355		ep_desc = &intf_desc->endpoint[i].desc;
 356
 357		if (usb_endpoint_is_bulk_in(ep_desc) &&
 358		    in_ep < __MT_EP_IN_MAX) {
 359			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
 360			in_ep++;
 361		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
 362			   out_ep < __MT_EP_OUT_MAX) {
 363			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
 364			out_ep++;
 365		}
 366	}
 367
 368	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
 369		return -EINVAL;
 370	return 0;
 371}
 372
 373static int
 374mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
 375		 int nsgs, gfp_t gfp)
 376{
 377	int i;
 378
 379	for (i = 0; i < nsgs; i++) {
 380		struct page *page;
 381		void *data;
 382		int offset;
 383
 384		data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
 385		if (!data)
 386			break;
 387
 388		page = virt_to_head_page(data);
 389		offset = data - page_address(page);
 390		sg_set_page(&urb->sg[i], page, q->buf_size, offset);
 391	}
 392
 393	if (i < nsgs) {
 394		int j;
 395
 396		for (j = nsgs; j < urb->num_sgs; j++)
 397			skb_free_frag(sg_virt(&urb->sg[j]));
 398		urb->num_sgs = i;
 399	}
 400
 401	urb->num_sgs = max_t(int, i, urb->num_sgs);
 402	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
 403	sg_init_marker(urb->sg, urb->num_sgs);
 404
 405	return i ? : -ENOMEM;
 406}
 407
 408static int
 409mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
 410		struct urb *urb, int nsgs, gfp_t gfp)
 411{
 412	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
 
 413
 414	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
 415		return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
 416
 417	urb->transfer_buffer_length = q->buf_size;
 418	urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
 419
 420	return urb->transfer_buffer ? 0 : -ENOMEM;
 421}
 422
 423static int
 424mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
 425		int sg_max_size)
 426{
 427	unsigned int size = sizeof(struct urb);
 428
 429	if (dev->usb.sg_en)
 430		size += sg_max_size * sizeof(struct scatterlist);
 431
 432	e->urb = kzalloc(size, GFP_KERNEL);
 433	if (!e->urb)
 434		return -ENOMEM;
 435
 436	usb_init_urb(e->urb);
 437
 438	if (dev->usb.sg_en && sg_max_size > 0)
 439		e->urb->sg = (struct scatterlist *)(e->urb + 1);
 440
 441	return 0;
 442}
 443
 444static int
 445mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
 446		   struct mt76_queue_entry *e)
 447{
 448	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
 449	int err, sg_size;
 450
 451	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
 452	err = mt76u_urb_alloc(dev, e, sg_size);
 453	if (err)
 454		return err;
 455
 456	return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
 457}
 458
 459static void mt76u_urb_free(struct urb *urb)
 460{
 461	int i;
 462
 463	for (i = 0; i < urb->num_sgs; i++)
 464		skb_free_frag(sg_virt(&urb->sg[i]));
 465
 466	if (urb->transfer_buffer)
 467		skb_free_frag(urb->transfer_buffer);
 468
 469	usb_free_urb(urb);
 470}
 471
 472static void
 473mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
 474		    struct urb *urb, usb_complete_t complete_fn,
 475		    void *context)
 476{
 477	struct usb_interface *uintf = to_usb_interface(dev->dev);
 478	struct usb_device *udev = interface_to_usbdev(uintf);
 479	unsigned int pipe;
 480
 481	if (dir == USB_DIR_IN)
 482		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
 483	else
 484		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
 485
 486	urb->dev = udev;
 487	urb->pipe = pipe;
 488	urb->complete = complete_fn;
 489	urb->context = context;
 490}
 491
 492static struct urb *
 493mt76u_get_next_rx_entry(struct mt76_queue *q)
 494{
 495	struct urb *urb = NULL;
 496	unsigned long flags;
 497
 498	spin_lock_irqsave(&q->lock, flags);
 499	if (q->queued > 0) {
 500		urb = q->entry[q->tail].urb;
 501		q->tail = (q->tail + 1) % q->ndesc;
 502		q->queued--;
 503	}
 504	spin_unlock_irqrestore(&q->lock, flags);
 505
 506	return urb;
 507}
 508
 509static int
 510mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
 511		       u32 data_len)
 512{
 513	u16 dma_len, min_len;
 514
 515	dma_len = get_unaligned_le16(data);
 516	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
 517		return dma_len;
 518
 519	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
 520	if (data_len < min_len || !dma_len ||
 521	    dma_len + MT_DMA_HDR_LEN > data_len ||
 522	    (dma_len & 0x3))
 523		return -EINVAL;
 524	return dma_len;
 525}
 526
 527static struct sk_buff *
 528mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
 529		   int len, int buf_size)
 530{
 531	int head_room, drv_flags = dev->drv->drv_flags;
 532	struct sk_buff *skb;
 533
 534	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
 535	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
 536		struct page *page;
 537
 538		/* slow path, not enough space for data and
 539		 * skb_shared_info
 540		 */
 541		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
 542		if (!skb)
 543			return NULL;
 544
 545		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
 546		data += head_room + MT_SKB_HEAD_LEN;
 547		page = virt_to_head_page(data);
 548		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 549				page, data - page_address(page),
 550				len - MT_SKB_HEAD_LEN, buf_size);
 551
 552		return skb;
 553	}
 554
 555	/* fast path */
 556	skb = build_skb(data, buf_size);
 557	if (!skb)
 558		return NULL;
 559
 560	skb_reserve(skb, head_room);
 561	__skb_put(skb, len);
 562
 563	return skb;
 564}
 565
 566static int
 567mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
 568		       int buf_size)
 569{
 570	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
 571	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
 572	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
 573	struct sk_buff *skb;
 574
 575	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
 576		return 0;
 577
 578	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
 579	if (len < 0)
 580		return 0;
 581
 582	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
 583	data_len = min_t(int, len, data_len - head_room);
 
 
 
 
 
 584	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
 585	if (!skb)
 586		return 0;
 587
 588	len -= data_len;
 589	while (len > 0 && nsgs < urb->num_sgs) {
 590		data_len = min_t(int, len, urb->sg[nsgs].length);
 591		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 592				sg_page(&urb->sg[nsgs]),
 593				urb->sg[nsgs].offset, data_len,
 594				buf_size);
 595		len -= data_len;
 596		nsgs++;
 597	}
 598	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
 
 
 599
 600	return nsgs;
 601}
 602
 603static void mt76u_complete_rx(struct urb *urb)
 604{
 605	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 606	struct mt76_queue *q = urb->context;
 607	unsigned long flags;
 608
 609	trace_rx_urb(dev, urb);
 610
 611	switch (urb->status) {
 612	case -ECONNRESET:
 613	case -ESHUTDOWN:
 614	case -ENOENT:
 615	case -EPROTO:
 616		return;
 617	default:
 618		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
 619				    urb->status);
 620		fallthrough;
 621	case 0:
 622		break;
 623	}
 624
 625	spin_lock_irqsave(&q->lock, flags);
 626	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
 627		goto out;
 628
 629	q->head = (q->head + 1) % q->ndesc;
 630	q->queued++;
 631	mt76_worker_schedule(&dev->usb.rx_worker);
 632out:
 633	spin_unlock_irqrestore(&q->lock, flags);
 634}
 635
 636static int
 637mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
 638		    struct urb *urb)
 639{
 640	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
 641
 642	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
 643			    mt76u_complete_rx, &dev->q_rx[qid]);
 644	trace_submit_urb(dev, urb);
 645
 646	return usb_submit_urb(urb, GFP_ATOMIC);
 647}
 648
 649static void
 650mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 651{
 652	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
 653	struct urb *urb;
 654	int err, count;
 655
 656	while (true) {
 657		urb = mt76u_get_next_rx_entry(q);
 658		if (!urb)
 659			break;
 660
 661		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
 662		if (count > 0) {
 663			err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
 664			if (err < 0)
 665				break;
 666		}
 667		mt76u_submit_rx_buf(dev, qid, urb);
 668	}
 669	if (qid == MT_RXQ_MAIN) {
 670		local_bh_disable();
 671		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
 672		local_bh_enable();
 673	}
 674}
 675
 676static void mt76u_rx_worker(struct mt76_worker *w)
 677{
 678	struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
 679	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
 680	int i;
 681
 682	rcu_read_lock();
 683	mt76_for_each_q_rx(dev, i)
 684		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
 685	rcu_read_unlock();
 686}
 687
 688static int
 689mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
 690{
 691	struct mt76_queue *q = &dev->q_rx[qid];
 692	unsigned long flags;
 693	int i, err = 0;
 694
 695	spin_lock_irqsave(&q->lock, flags);
 696	for (i = 0; i < q->ndesc; i++) {
 697		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
 698		if (err < 0)
 699			break;
 700	}
 701	q->head = q->tail = 0;
 702	q->queued = 0;
 703	spin_unlock_irqrestore(&q->lock, flags);
 704
 705	return err;
 706}
 707
 708static int
 709mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
 710{
 711	struct mt76_queue *q = &dev->q_rx[qid];
 712	int i, err;
 713
 
 
 
 
 714	spin_lock_init(&q->lock);
 715	q->entry = devm_kcalloc(dev->dev,
 716				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
 717				GFP_KERNEL);
 718	if (!q->entry)
 719		return -ENOMEM;
 720
 721	q->ndesc = MT_NUM_RX_ENTRIES;
 722	q->buf_size = PAGE_SIZE;
 723
 724	for (i = 0; i < q->ndesc; i++) {
 725		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
 726		if (err < 0)
 727			return err;
 728	}
 729
 730	return mt76u_submit_rx_buffers(dev, qid);
 731}
 732
 733int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
 734{
 735	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
 736}
 737EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
 738
 739static void
 740mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 741{
 742	struct page *page;
 743	int i;
 744
 745	for (i = 0; i < q->ndesc; i++) {
 746		if (!q->entry[i].urb)
 747			continue;
 748
 749		mt76u_urb_free(q->entry[i].urb);
 750		q->entry[i].urb = NULL;
 751	}
 752
 753	if (!q->rx_page.va)
 754		return;
 755
 756	page = virt_to_page(q->rx_page.va);
 757	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
 758	memset(&q->rx_page, 0, sizeof(q->rx_page));
 759}
 760
 761static void mt76u_free_rx(struct mt76_dev *dev)
 762{
 763	int i;
 764
 765	mt76_worker_teardown(&dev->usb.rx_worker);
 766
 767	mt76_for_each_q_rx(dev, i)
 768		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
 769}
 770
 771void mt76u_stop_rx(struct mt76_dev *dev)
 772{
 773	int i;
 774
 775	mt76_worker_disable(&dev->usb.rx_worker);
 776
 777	mt76_for_each_q_rx(dev, i) {
 778		struct mt76_queue *q = &dev->q_rx[i];
 779		int j;
 780
 781		for (j = 0; j < q->ndesc; j++)
 782			usb_poison_urb(q->entry[j].urb);
 783	}
 784}
 785EXPORT_SYMBOL_GPL(mt76u_stop_rx);
 786
 787int mt76u_resume_rx(struct mt76_dev *dev)
 788{
 789	int i;
 790
 791	mt76_for_each_q_rx(dev, i) {
 792		struct mt76_queue *q = &dev->q_rx[i];
 793		int err, j;
 794
 795		for (j = 0; j < q->ndesc; j++)
 796			usb_unpoison_urb(q->entry[j].urb);
 797
 798		err = mt76u_submit_rx_buffers(dev, i);
 799		if (err < 0)
 800			return err;
 801	}
 802
 803	mt76_worker_enable(&dev->usb.rx_worker);
 804
 805	return 0;
 806}
 807EXPORT_SYMBOL_GPL(mt76u_resume_rx);
 808
 809static void mt76u_status_worker(struct mt76_worker *w)
 810{
 811	struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
 812	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
 813	struct mt76_queue_entry entry;
 814	struct mt76_queue *q;
 815	int i;
 816
 
 
 
 817	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
 818		q = dev->phy.q_tx[i];
 819		if (!q)
 820			continue;
 821
 822		while (q->queued > 0) {
 823			if (!q->entry[q->tail].done)
 824				break;
 825
 826			entry = q->entry[q->tail];
 827			q->entry[q->tail].done = false;
 828
 829			mt76_queue_tx_complete(dev, q, &entry);
 830		}
 831
 832		if (!q->queued)
 833			wake_up(&dev->tx_wait);
 834
 835		mt76_worker_schedule(&dev->tx_worker);
 
 836
 837		if (dev->drv->tx_status_data &&
 838		    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
 839			queue_work(dev->wq, &dev->usb.stat_work);
 840	}
 841}
 842
 843static void mt76u_tx_status_data(struct work_struct *work)
 844{
 845	struct mt76_usb *usb;
 846	struct mt76_dev *dev;
 847	u8 update = 1;
 848	u16 count = 0;
 849
 850	usb = container_of(work, struct mt76_usb, stat_work);
 851	dev = container_of(usb, struct mt76_dev, usb);
 852
 853	while (true) {
 854		if (test_bit(MT76_REMOVED, &dev->phy.state))
 855			break;
 856
 857		if (!dev->drv->tx_status_data(dev, &update))
 858			break;
 859		count++;
 860	}
 861
 862	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
 863		queue_work(dev->wq, &usb->stat_work);
 864	else
 865		clear_bit(MT76_READING_STATS, &dev->phy.state);
 866}
 867
 868static void mt76u_complete_tx(struct urb *urb)
 869{
 870	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
 871	struct mt76_queue_entry *e = urb->context;
 872
 873	if (mt76u_urb_error(urb))
 874		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
 875	e->done = true;
 876
 877	mt76_worker_schedule(&dev->usb.status_worker);
 878}
 879
 880static int
 881mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
 882		       struct urb *urb)
 883{
 884	urb->transfer_buffer_length = skb->len;
 885
 886	if (!dev->usb.sg_en) {
 887		urb->transfer_buffer = skb->data;
 888		return 0;
 889	}
 890
 891	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
 892	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
 893	if (!urb->num_sgs)
 894		return -ENOMEM;
 895
 896	return urb->num_sgs;
 897}
 898
 899static int
 900mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
 901		   struct sk_buff *skb, struct mt76_wcid *wcid,
 902		   struct ieee80211_sta *sta)
 903{
 904	struct mt76_tx_info tx_info = {
 905		.skb = skb,
 906	};
 907	u16 idx = q->head;
 908	int err;
 909
 910	if (q->queued == q->ndesc)
 911		return -ENOSPC;
 912
 913	skb->prev = skb->next = NULL;
 914	err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info);
 915	if (err < 0)
 916		return err;
 917
 918	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
 919	if (err < 0)
 920		return err;
 921
 922	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
 923			    q->entry[idx].urb, mt76u_complete_tx,
 924			    &q->entry[idx]);
 925
 926	q->head = (q->head + 1) % q->ndesc;
 927	q->entry[idx].skb = tx_info.skb;
 928	q->entry[idx].wcid = 0xffff;
 929	q->queued++;
 930
 931	return idx;
 932}
 933
 934static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
 935{
 936	struct urb *urb;
 937	int err;
 938
 939	while (q->first != q->head) {
 940		urb = q->entry[q->first].urb;
 941
 942		trace_submit_urb(dev, urb);
 943		err = usb_submit_urb(urb, GFP_ATOMIC);
 944		if (err < 0) {
 945			if (err == -ENODEV)
 946				set_bit(MT76_REMOVED, &dev->phy.state);
 947			else
 948				dev_err(dev->dev, "tx urb submit failed:%d\n",
 949					err);
 950			break;
 951		}
 952		q->first = (q->first + 1) % q->ndesc;
 953	}
 954}
 955
 956static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
 957{
 958	if (mt76_chip(dev) == 0x7663) {
 959		static const u8 lmac_queue_map[] = {
 960			/* ac to lmac mapping */
 961			[IEEE80211_AC_BK] = 0,
 962			[IEEE80211_AC_BE] = 1,
 963			[IEEE80211_AC_VI] = 2,
 964			[IEEE80211_AC_VO] = 4,
 965		};
 966
 967		if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
 968			return 1; /* BE */
 969
 970		return lmac_queue_map[ac];
 971	}
 972
 973	return mt76_ac_to_hwq(ac);
 974}
 975
 976static int mt76u_alloc_tx(struct mt76_dev *dev)
 977{
 978	struct mt76_queue *q;
 979	int i, j, err;
 980
 981	for (i = 0; i <= MT_TXQ_PSD; i++) {
 982		if (i >= IEEE80211_NUM_ACS) {
 983			dev->phy.q_tx[i] = dev->phy.q_tx[0];
 984			continue;
 985		}
 986
 987		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
 988		if (!q)
 989			return -ENOMEM;
 990
 991		spin_lock_init(&q->lock);
 992		q->hw_idx = mt76u_ac_to_hwq(dev, i);
 993		q->qid = i;
 994
 995		dev->phy.q_tx[i] = q;
 996
 997		q->entry = devm_kcalloc(dev->dev,
 998					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
 999					GFP_KERNEL);
1000		if (!q->entry)
1001			return -ENOMEM;
1002
1003		q->ndesc = MT_NUM_TX_ENTRIES;
1004		for (j = 0; j < q->ndesc; j++) {
1005			err = mt76u_urb_alloc(dev, &q->entry[j],
1006					      MT_TX_SG_MAX_SIZE);
1007			if (err < 0)
1008				return err;
1009		}
1010	}
1011	return 0;
1012}
1013
1014static void mt76u_free_tx(struct mt76_dev *dev)
1015{
1016	int i;
1017
1018	mt76_worker_teardown(&dev->usb.status_worker);
1019
1020	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1021		struct mt76_queue *q;
1022		int j;
1023
1024		q = dev->phy.q_tx[i];
1025		if (!q)
1026			continue;
1027
1028		for (j = 0; j < q->ndesc; j++) {
1029			usb_free_urb(q->entry[j].urb);
1030			q->entry[j].urb = NULL;
1031		}
1032	}
1033}
1034
1035void mt76u_stop_tx(struct mt76_dev *dev)
1036{
1037	int ret;
1038
1039	mt76_worker_disable(&dev->usb.status_worker);
1040
1041	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
1042				 HZ / 5);
1043	if (!ret) {
1044		struct mt76_queue_entry entry;
1045		struct mt76_queue *q;
1046		int i, j;
1047
1048		dev_err(dev->dev, "timed out waiting for pending tx\n");
1049
1050		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1051			q = dev->phy.q_tx[i];
1052			if (!q)
1053				continue;
1054
1055			for (j = 0; j < q->ndesc; j++)
1056				usb_kill_urb(q->entry[j].urb);
1057		}
1058
1059		mt76_worker_disable(&dev->tx_worker);
1060
1061		/* On device removal we maight queue skb's, but mt76u_tx_kick()
1062		 * will fail to submit urb, cleanup those skb's manually.
1063		 */
1064		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1065			q = dev->phy.q_tx[i];
1066			if (!q)
1067				continue;
1068
1069			while (q->queued > 0) {
1070				entry = q->entry[q->tail];
1071				q->entry[q->tail].done = false;
1072				mt76_queue_tx_complete(dev, q, &entry);
1073			}
1074		}
1075
1076		mt76_worker_enable(&dev->tx_worker);
1077	}
1078
1079	cancel_work_sync(&dev->usb.stat_work);
1080	clear_bit(MT76_READING_STATS, &dev->phy.state);
1081
1082	mt76_worker_enable(&dev->usb.status_worker);
1083
1084	mt76_tx_status_check(dev, NULL, true);
1085}
1086EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1087
1088void mt76u_queues_deinit(struct mt76_dev *dev)
1089{
1090	mt76u_stop_rx(dev);
1091	mt76u_stop_tx(dev);
1092
1093	mt76u_free_rx(dev);
1094	mt76u_free_tx(dev);
1095}
1096EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1097
1098int mt76u_alloc_queues(struct mt76_dev *dev)
1099{
1100	int err;
1101
1102	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1103	if (err < 0)
1104		return err;
1105
1106	return mt76u_alloc_tx(dev);
1107}
1108EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1109
1110static const struct mt76_queue_ops usb_queue_ops = {
1111	.tx_queue_skb = mt76u_tx_queue_skb,
1112	.kick = mt76u_tx_kick,
1113};
1114
1115int mt76u_init(struct mt76_dev *dev,
1116	       struct usb_interface *intf, bool ext)
1117{
1118	static struct mt76_bus_ops mt76u_ops = {
1119		.read_copy = mt76u_read_copy_ext,
1120		.wr_rp = mt76u_wr_rp,
1121		.rd_rp = mt76u_rd_rp,
1122		.type = MT76_BUS_USB,
1123	};
1124	struct usb_device *udev = interface_to_usbdev(intf);
1125	struct mt76_usb *usb = &dev->usb;
1126	int err;
1127
1128	mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
1129	mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
1130	mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
1131	mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
1132
1133	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1134
1135	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
1136	if (usb->data_len < 32)
1137		usb->data_len = 32;
1138
1139	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1140	if (!usb->data)
1141		return -ENOMEM;
1142
1143	mutex_init(&usb->usb_ctrl_mtx);
1144	dev->bus = &mt76u_ops;
1145	dev->queue_ops = &usb_queue_ops;
1146
1147	dev_set_drvdata(&udev->dev, dev);
1148
1149	usb->sg_en = mt76u_check_sg(dev);
1150
1151	err = mt76u_set_endpoints(intf, usb);
1152	if (err < 0)
1153		return err;
1154
1155	err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1156				"usb-rx");
1157	if (err)
1158		return err;
1159
1160	err = mt76_worker_setup(dev->hw, &usb->status_worker,
1161				mt76u_status_worker, "usb-status");
1162	if (err)
1163		return err;
1164
1165	sched_set_fifo_low(usb->rx_worker.task);
1166	sched_set_fifo_low(usb->status_worker.task);
1167
1168	return 0;
1169}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170EXPORT_SYMBOL_GPL(mt76u_init);
1171
1172MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
 
1173MODULE_LICENSE("Dual BSD/GPL");