Loading...
1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 <http://rt2x00.serialmonkey.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the
18 Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 Module: rt2x00usb
24 Abstract: rt2x00 generic usb device routines.
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30#include <linux/usb.h>
31#include <linux/bug.h>
32
33#include "rt2x00.h"
34#include "rt2x00usb.h"
35
36/*
37 * Interfacing with the HW.
38 */
39int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
40 const u8 request, const u8 requesttype,
41 const u16 offset, const u16 value,
42 void *buffer, const u16 buffer_length,
43 const int timeout)
44{
45 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
46 int status;
47 unsigned int i;
48 unsigned int pipe =
49 (requesttype == USB_VENDOR_REQUEST_IN) ?
50 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
51
52 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
53 return -ENODEV;
54
55 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
56 status = usb_control_msg(usb_dev, pipe, request, requesttype,
57 value, offset, buffer, buffer_length,
58 timeout);
59 if (status >= 0)
60 return 0;
61
62 /*
63 * Check for errors
64 * -ENODEV: Device has disappeared, no point continuing.
65 * All other errors: Try again.
66 */
67 else if (status == -ENODEV) {
68 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
69 break;
70 }
71 }
72
73 ERROR(rt2x00dev,
74 "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
75 request, offset, status);
76
77 return status;
78}
79EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
80
81int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
82 const u8 request, const u8 requesttype,
83 const u16 offset, void *buffer,
84 const u16 buffer_length, const int timeout)
85{
86 int status;
87
88 BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
89
90 /*
91 * Check for Cache availability.
92 */
93 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
94 ERROR(rt2x00dev, "CSR cache not available.\n");
95 return -ENOMEM;
96 }
97
98 if (requesttype == USB_VENDOR_REQUEST_OUT)
99 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
100
101 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
102 offset, 0, rt2x00dev->csr.cache,
103 buffer_length, timeout);
104
105 if (!status && requesttype == USB_VENDOR_REQUEST_IN)
106 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
107
108 return status;
109}
110EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
111
112int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
113 const u8 request, const u8 requesttype,
114 const u16 offset, void *buffer,
115 const u16 buffer_length, const int timeout)
116{
117 int status = 0;
118 unsigned char *tb;
119 u16 off, len, bsize;
120
121 mutex_lock(&rt2x00dev->csr_mutex);
122
123 tb = (char *)buffer;
124 off = offset;
125 len = buffer_length;
126 while (len && !status) {
127 bsize = min_t(u16, CSR_CACHE_SIZE, len);
128 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
129 requesttype, off, tb,
130 bsize, timeout);
131
132 tb += bsize;
133 len -= bsize;
134 off += bsize;
135 }
136
137 mutex_unlock(&rt2x00dev->csr_mutex);
138
139 return status;
140}
141EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
142
143int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
144 const unsigned int offset,
145 const struct rt2x00_field32 field,
146 u32 *reg)
147{
148 unsigned int i;
149
150 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
151 return -ENODEV;
152
153 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
154 rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
155 if (!rt2x00_get_field32(*reg, field))
156 return 1;
157 udelay(REGISTER_BUSY_DELAY);
158 }
159
160 ERROR(rt2x00dev, "Indirect register access failed: "
161 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
162 *reg = ~0;
163
164 return 0;
165}
166EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
167
168
169struct rt2x00_async_read_data {
170 __le32 reg;
171 struct usb_ctrlrequest cr;
172 struct rt2x00_dev *rt2x00dev;
173 bool (*callback)(struct rt2x00_dev *, int, u32);
174};
175
176static void rt2x00usb_register_read_async_cb(struct urb *urb)
177{
178 struct rt2x00_async_read_data *rd = urb->context;
179 if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
180 if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
181 kfree(rd);
182 } else
183 kfree(rd);
184}
185
186void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
187 const unsigned int offset,
188 bool (*callback)(struct rt2x00_dev*, int, u32))
189{
190 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
191 struct urb *urb;
192 struct rt2x00_async_read_data *rd;
193
194 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
195 if (!rd)
196 return;
197
198 urb = usb_alloc_urb(0, GFP_ATOMIC);
199 if (!urb) {
200 kfree(rd);
201 return;
202 }
203
204 rd->rt2x00dev = rt2x00dev;
205 rd->callback = callback;
206 rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
207 rd->cr.bRequest = USB_MULTI_READ;
208 rd->cr.wValue = 0;
209 rd->cr.wIndex = cpu_to_le16(offset);
210 rd->cr.wLength = cpu_to_le16(sizeof(u32));
211
212 usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
213 (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
214 rt2x00usb_register_read_async_cb, rd);
215 if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
216 kfree(rd);
217 usb_free_urb(urb);
218}
219EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
220
221/*
222 * TX data handlers.
223 */
224static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
225{
226 /*
227 * If the transfer to hardware succeeded, it does not mean the
228 * frame was send out correctly. It only means the frame
229 * was successfully pushed to the hardware, we have no
230 * way to determine the transmission status right now.
231 * (Only indirectly by looking at the failed TX counters
232 * in the register).
233 */
234 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
235 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
236 else
237 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
238}
239
240static void rt2x00usb_work_txdone(struct work_struct *work)
241{
242 struct rt2x00_dev *rt2x00dev =
243 container_of(work, struct rt2x00_dev, txdone_work);
244 struct data_queue *queue;
245 struct queue_entry *entry;
246
247 tx_queue_for_each(rt2x00dev, queue) {
248 while (!rt2x00queue_empty(queue)) {
249 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
250
251 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
252 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
253 break;
254
255 rt2x00usb_work_txdone_entry(entry);
256 }
257 }
258}
259
260static void rt2x00usb_interrupt_txdone(struct urb *urb)
261{
262 struct queue_entry *entry = (struct queue_entry *)urb->context;
263 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264
265 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
266 return;
267 /*
268 * Check if the frame was correctly uploaded
269 */
270 if (urb->status)
271 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
272 /*
273 * Report the frame as DMA done
274 */
275 rt2x00lib_dmadone(entry);
276
277 if (rt2x00dev->ops->lib->tx_dma_done)
278 rt2x00dev->ops->lib->tx_dma_done(entry);
279 /*
280 * Schedule the delayed work for reading the TX status
281 * from the device.
282 */
283 if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) ||
284 !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
286}
287
288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
289{
290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
292 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
293 u32 length;
294 int status;
295
296 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
297 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
298 return false;
299
300 /*
301 * USB devices cannot blindly pass the skb->len as the
302 * length of the data to usb_fill_bulk_urb. Pass the skb
303 * to the driver to determine what the length should be.
304 */
305 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
306
307 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
308 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
309 entry->skb->data, length,
310 rt2x00usb_interrupt_txdone, entry);
311
312 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
313 if (status) {
314 if (status == -ENODEV)
315 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
316 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
317 rt2x00lib_dmadone(entry);
318 }
319
320 return false;
321}
322
323/*
324 * RX data handlers.
325 */
326static void rt2x00usb_work_rxdone(struct work_struct *work)
327{
328 struct rt2x00_dev *rt2x00dev =
329 container_of(work, struct rt2x00_dev, rxdone_work);
330 struct queue_entry *entry;
331 struct skb_frame_desc *skbdesc;
332 u8 rxd[32];
333
334 while (!rt2x00queue_empty(rt2x00dev->rx)) {
335 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
336
337 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
338 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
339 break;
340
341 /*
342 * Fill in desc fields of the skb descriptor
343 */
344 skbdesc = get_skb_frame_desc(entry->skb);
345 skbdesc->desc = rxd;
346 skbdesc->desc_len = entry->queue->desc_size;
347
348 /*
349 * Send the frame to rt2x00lib for further processing.
350 */
351 rt2x00lib_rxdone(entry);
352 }
353}
354
355static void rt2x00usb_interrupt_rxdone(struct urb *urb)
356{
357 struct queue_entry *entry = (struct queue_entry *)urb->context;
358 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
359
360 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
361 return;
362
363 /*
364 * Report the frame as DMA done
365 */
366 rt2x00lib_dmadone(entry);
367
368 /*
369 * Check if the received data is simply too small
370 * to be actually valid, or if the urb is signaling
371 * a problem.
372 */
373 if (urb->actual_length < entry->queue->desc_size || urb->status)
374 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
375
376 /*
377 * Schedule the delayed work for reading the RX status
378 * from the device.
379 */
380 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
381}
382
383static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void* data)
384{
385 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
386 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
387 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
388 int status;
389
390 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
391 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
392 return false;
393
394 rt2x00lib_dmastart(entry);
395
396 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
397 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
398 entry->skb->data, entry->skb->len,
399 rt2x00usb_interrupt_rxdone, entry);
400
401 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
402 if (status) {
403 if (status == -ENODEV)
404 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
405 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
406 rt2x00lib_dmadone(entry);
407 }
408
409 return false;
410}
411
412void rt2x00usb_kick_queue(struct data_queue *queue)
413{
414 switch (queue->qid) {
415 case QID_AC_VO:
416 case QID_AC_VI:
417 case QID_AC_BE:
418 case QID_AC_BK:
419 if (!rt2x00queue_empty(queue))
420 rt2x00queue_for_each_entry(queue,
421 Q_INDEX_DONE,
422 Q_INDEX,
423 NULL,
424 rt2x00usb_kick_tx_entry);
425 break;
426 case QID_RX:
427 if (!rt2x00queue_full(queue))
428 rt2x00queue_for_each_entry(queue,
429 Q_INDEX_DONE,
430 Q_INDEX,
431 NULL,
432 rt2x00usb_kick_rx_entry);
433 break;
434 default:
435 break;
436 }
437}
438EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
439
440static bool rt2x00usb_flush_entry(struct queue_entry *entry, void* data)
441{
442 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
443 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
444 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
445
446 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
447 return false;
448
449 usb_kill_urb(entry_priv->urb);
450
451 /*
452 * Kill guardian urb (if required by driver).
453 */
454 if ((entry->queue->qid == QID_BEACON) &&
455 (test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)))
456 usb_kill_urb(bcn_priv->guardian_urb);
457
458 return false;
459}
460
461void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
462{
463 struct work_struct *completion;
464 unsigned int i;
465
466 if (drop)
467 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
468 rt2x00usb_flush_entry);
469
470 /*
471 * Obtain the queue completion handler
472 */
473 switch (queue->qid) {
474 case QID_AC_VO:
475 case QID_AC_VI:
476 case QID_AC_BE:
477 case QID_AC_BK:
478 completion = &queue->rt2x00dev->txdone_work;
479 break;
480 case QID_RX:
481 completion = &queue->rt2x00dev->rxdone_work;
482 break;
483 default:
484 return;
485 }
486
487 for (i = 0; i < 10; i++) {
488 /*
489 * Check if the driver is already done, otherwise we
490 * have to sleep a little while to give the driver/hw
491 * the oppurtunity to complete interrupt process itself.
492 */
493 if (rt2x00queue_empty(queue))
494 break;
495
496 /*
497 * Schedule the completion handler manually, when this
498 * worker function runs, it should cleanup the queue.
499 */
500 queue_work(queue->rt2x00dev->workqueue, completion);
501
502 /*
503 * Wait for a little while to give the driver
504 * the oppurtunity to recover itself.
505 */
506 msleep(10);
507 }
508}
509EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
510
511static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
512{
513 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
514 " invoke forced forced reset\n", queue->qid);
515
516 rt2x00queue_flush_queue(queue, true);
517}
518
519static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
520{
521 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
522 " invoke forced tx handler\n", queue->qid);
523
524 queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
525}
526
527static int rt2x00usb_status_timeout(struct data_queue *queue)
528{
529 struct queue_entry *entry;
530
531 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
532 return rt2x00queue_status_timeout(entry);
533}
534
535static int rt2x00usb_dma_timeout(struct data_queue *queue)
536{
537 struct queue_entry *entry;
538
539 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
540 return rt2x00queue_dma_timeout(entry);
541}
542
543void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
544{
545 struct data_queue *queue;
546
547 tx_queue_for_each(rt2x00dev, queue) {
548 if (!rt2x00queue_empty(queue)) {
549 if (rt2x00usb_dma_timeout(queue))
550 rt2x00usb_watchdog_tx_dma(queue);
551 if (rt2x00usb_status_timeout(queue))
552 rt2x00usb_watchdog_tx_status(queue);
553 }
554 }
555}
556EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
557
558/*
559 * Radio handlers
560 */
561void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
562{
563 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
564 REGISTER_TIMEOUT);
565}
566EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
567
568/*
569 * Device initialization handlers.
570 */
571void rt2x00usb_clear_entry(struct queue_entry *entry)
572{
573 entry->flags = 0;
574
575 if (entry->queue->qid == QID_RX)
576 rt2x00usb_kick_rx_entry(entry, NULL);
577}
578EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
579
580static void rt2x00usb_assign_endpoint(struct data_queue *queue,
581 struct usb_endpoint_descriptor *ep_desc)
582{
583 struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
584 int pipe;
585
586 queue->usb_endpoint = usb_endpoint_num(ep_desc);
587
588 if (queue->qid == QID_RX) {
589 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
590 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
591 } else {
592 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
593 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
594 }
595
596 if (!queue->usb_maxpacket)
597 queue->usb_maxpacket = 1;
598}
599
600static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
601{
602 struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
603 struct usb_host_interface *intf_desc = intf->cur_altsetting;
604 struct usb_endpoint_descriptor *ep_desc;
605 struct data_queue *queue = rt2x00dev->tx;
606 struct usb_endpoint_descriptor *tx_ep_desc = NULL;
607 unsigned int i;
608
609 /*
610 * Walk through all available endpoints to search for "bulk in"
611 * and "bulk out" endpoints. When we find such endpoints collect
612 * the information we need from the descriptor and assign it
613 * to the queue.
614 */
615 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
616 ep_desc = &intf_desc->endpoint[i].desc;
617
618 if (usb_endpoint_is_bulk_in(ep_desc)) {
619 rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
620 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
621 (queue != queue_end(rt2x00dev))) {
622 rt2x00usb_assign_endpoint(queue, ep_desc);
623 queue = queue_next(queue);
624
625 tx_ep_desc = ep_desc;
626 }
627 }
628
629 /*
630 * At least 1 endpoint for RX and 1 endpoint for TX must be available.
631 */
632 if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
633 ERROR(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
634 return -EPIPE;
635 }
636
637 /*
638 * It might be possible not all queues have a dedicated endpoint.
639 * Loop through all TX queues and copy the endpoint information
640 * which we have gathered from already assigned endpoints.
641 */
642 txall_queue_for_each(rt2x00dev, queue) {
643 if (!queue->usb_endpoint)
644 rt2x00usb_assign_endpoint(queue, tx_ep_desc);
645 }
646
647 return 0;
648}
649
650static int rt2x00usb_alloc_entries(struct data_queue *queue)
651{
652 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
653 struct queue_entry_priv_usb *entry_priv;
654 struct queue_entry_priv_usb_bcn *bcn_priv;
655 unsigned int i;
656
657 for (i = 0; i < queue->limit; i++) {
658 entry_priv = queue->entries[i].priv_data;
659 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
660 if (!entry_priv->urb)
661 return -ENOMEM;
662 }
663
664 /*
665 * If this is not the beacon queue or
666 * no guardian byte was required for the beacon,
667 * then we are done.
668 */
669 if (queue->qid != QID_BEACON ||
670 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
671 return 0;
672
673 for (i = 0; i < queue->limit; i++) {
674 bcn_priv = queue->entries[i].priv_data;
675 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
676 if (!bcn_priv->guardian_urb)
677 return -ENOMEM;
678 }
679
680 return 0;
681}
682
683static void rt2x00usb_free_entries(struct data_queue *queue)
684{
685 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
686 struct queue_entry_priv_usb *entry_priv;
687 struct queue_entry_priv_usb_bcn *bcn_priv;
688 unsigned int i;
689
690 if (!queue->entries)
691 return;
692
693 for (i = 0; i < queue->limit; i++) {
694 entry_priv = queue->entries[i].priv_data;
695 usb_kill_urb(entry_priv->urb);
696 usb_free_urb(entry_priv->urb);
697 }
698
699 /*
700 * If this is not the beacon queue or
701 * no guardian byte was required for the beacon,
702 * then we are done.
703 */
704 if (queue->qid != QID_BEACON ||
705 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
706 return;
707
708 for (i = 0; i < queue->limit; i++) {
709 bcn_priv = queue->entries[i].priv_data;
710 usb_kill_urb(bcn_priv->guardian_urb);
711 usb_free_urb(bcn_priv->guardian_urb);
712 }
713}
714
715int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
716{
717 struct data_queue *queue;
718 int status;
719
720 /*
721 * Find endpoints for each queue
722 */
723 status = rt2x00usb_find_endpoints(rt2x00dev);
724 if (status)
725 goto exit;
726
727 /*
728 * Allocate DMA
729 */
730 queue_for_each(rt2x00dev, queue) {
731 status = rt2x00usb_alloc_entries(queue);
732 if (status)
733 goto exit;
734 }
735
736 return 0;
737
738exit:
739 rt2x00usb_uninitialize(rt2x00dev);
740
741 return status;
742}
743EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
744
745void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
746{
747 struct data_queue *queue;
748
749 queue_for_each(rt2x00dev, queue)
750 rt2x00usb_free_entries(queue);
751}
752EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
753
754/*
755 * USB driver handlers.
756 */
757static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
758{
759 kfree(rt2x00dev->rf);
760 rt2x00dev->rf = NULL;
761
762 kfree(rt2x00dev->eeprom);
763 rt2x00dev->eeprom = NULL;
764
765 kfree(rt2x00dev->csr.cache);
766 rt2x00dev->csr.cache = NULL;
767}
768
769static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
770{
771 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
772 if (!rt2x00dev->csr.cache)
773 goto exit;
774
775 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
776 if (!rt2x00dev->eeprom)
777 goto exit;
778
779 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
780 if (!rt2x00dev->rf)
781 goto exit;
782
783 return 0;
784
785exit:
786 ERROR_PROBE("Failed to allocate registers.\n");
787
788 rt2x00usb_free_reg(rt2x00dev);
789
790 return -ENOMEM;
791}
792
793int rt2x00usb_probe(struct usb_interface *usb_intf,
794 const struct rt2x00_ops *ops)
795{
796 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
797 struct ieee80211_hw *hw;
798 struct rt2x00_dev *rt2x00dev;
799 int retval;
800
801 usb_dev = usb_get_dev(usb_dev);
802 usb_reset_device(usb_dev);
803
804 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
805 if (!hw) {
806 ERROR_PROBE("Failed to allocate hardware.\n");
807 retval = -ENOMEM;
808 goto exit_put_device;
809 }
810
811 usb_set_intfdata(usb_intf, hw);
812
813 rt2x00dev = hw->priv;
814 rt2x00dev->dev = &usb_intf->dev;
815 rt2x00dev->ops = ops;
816 rt2x00dev->hw = hw;
817
818 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
819
820 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
821 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
822 init_timer(&rt2x00dev->txstatus_timer);
823
824 retval = rt2x00usb_alloc_reg(rt2x00dev);
825 if (retval)
826 goto exit_free_device;
827
828 retval = rt2x00lib_probe_dev(rt2x00dev);
829 if (retval)
830 goto exit_free_reg;
831
832 return 0;
833
834exit_free_reg:
835 rt2x00usb_free_reg(rt2x00dev);
836
837exit_free_device:
838 ieee80211_free_hw(hw);
839
840exit_put_device:
841 usb_put_dev(usb_dev);
842
843 usb_set_intfdata(usb_intf, NULL);
844
845 return retval;
846}
847EXPORT_SYMBOL_GPL(rt2x00usb_probe);
848
849void rt2x00usb_disconnect(struct usb_interface *usb_intf)
850{
851 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
852 struct rt2x00_dev *rt2x00dev = hw->priv;
853
854 /*
855 * Free all allocated data.
856 */
857 rt2x00lib_remove_dev(rt2x00dev);
858 rt2x00usb_free_reg(rt2x00dev);
859 ieee80211_free_hw(hw);
860
861 /*
862 * Free the USB device data.
863 */
864 usb_set_intfdata(usb_intf, NULL);
865 usb_put_dev(interface_to_usbdev(usb_intf));
866}
867EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
868
869#ifdef CONFIG_PM
870int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
871{
872 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
873 struct rt2x00_dev *rt2x00dev = hw->priv;
874
875 return rt2x00lib_suspend(rt2x00dev, state);
876}
877EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
878
879int rt2x00usb_resume(struct usb_interface *usb_intf)
880{
881 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
882 struct rt2x00_dev *rt2x00dev = hw->priv;
883
884 return rt2x00lib_resume(rt2x00dev);
885}
886EXPORT_SYMBOL_GPL(rt2x00usb_resume);
887#endif /* CONFIG_PM */
888
889/*
890 * rt2x00usb module information.
891 */
892MODULE_AUTHOR(DRV_PROJECT);
893MODULE_VERSION(DRV_VERSION);
894MODULE_DESCRIPTION("rt2x00 usb library");
895MODULE_LICENSE("GPL");
1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 <http://rt2x00.serialmonkey.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 Module: rt2x00usb
22 Abstract: rt2x00 generic usb device routines.
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/usb.h>
29#include <linux/bug.h>
30
31#include "rt2x00.h"
32#include "rt2x00usb.h"
33
34/*
35 * Interfacing with the HW.
36 */
37int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
38 const u8 request, const u8 requesttype,
39 const u16 offset, const u16 value,
40 void *buffer, const u16 buffer_length,
41 const int timeout)
42{
43 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
44 int status;
45 unsigned int i;
46 unsigned int pipe =
47 (requesttype == USB_VENDOR_REQUEST_IN) ?
48 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
49
50 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
51 return -ENODEV;
52
53 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
54 status = usb_control_msg(usb_dev, pipe, request, requesttype,
55 value, offset, buffer, buffer_length,
56 timeout);
57 if (status >= 0)
58 return 0;
59
60 /*
61 * Check for errors
62 * -ENODEV: Device has disappeared, no point continuing.
63 * All other errors: Try again.
64 */
65 else if (status == -ENODEV) {
66 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
67 break;
68 }
69 }
70
71 rt2x00_err(rt2x00dev,
72 "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
73 request, offset, status);
74
75 return status;
76}
77EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
78
79int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
80 const u8 request, const u8 requesttype,
81 const u16 offset, void *buffer,
82 const u16 buffer_length, const int timeout)
83{
84 int status;
85
86 BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
87
88 /*
89 * Check for Cache availability.
90 */
91 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
92 rt2x00_err(rt2x00dev, "CSR cache not available\n");
93 return -ENOMEM;
94 }
95
96 if (requesttype == USB_VENDOR_REQUEST_OUT)
97 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
98
99 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
100 offset, 0, rt2x00dev->csr.cache,
101 buffer_length, timeout);
102
103 if (!status && requesttype == USB_VENDOR_REQUEST_IN)
104 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
105
106 return status;
107}
108EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
109
110int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
111 const u8 request, const u8 requesttype,
112 const u16 offset, void *buffer,
113 const u16 buffer_length, const int timeout)
114{
115 int status = 0;
116 unsigned char *tb;
117 u16 off, len, bsize;
118
119 mutex_lock(&rt2x00dev->csr_mutex);
120
121 tb = (char *)buffer;
122 off = offset;
123 len = buffer_length;
124 while (len && !status) {
125 bsize = min_t(u16, CSR_CACHE_SIZE, len);
126 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
127 requesttype, off, tb,
128 bsize, timeout);
129
130 tb += bsize;
131 len -= bsize;
132 off += bsize;
133 }
134
135 mutex_unlock(&rt2x00dev->csr_mutex);
136
137 return status;
138}
139EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
140
141int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
142 const unsigned int offset,
143 const struct rt2x00_field32 field,
144 u32 *reg)
145{
146 unsigned int i;
147
148 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
149 return -ENODEV;
150
151 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
152 rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
153 if (!rt2x00_get_field32(*reg, field))
154 return 1;
155 udelay(REGISTER_BUSY_DELAY);
156 }
157
158 rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n",
159 offset, *reg);
160 *reg = ~0;
161
162 return 0;
163}
164EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
165
166
167struct rt2x00_async_read_data {
168 __le32 reg;
169 struct usb_ctrlrequest cr;
170 struct rt2x00_dev *rt2x00dev;
171 bool (*callback)(struct rt2x00_dev *, int, u32);
172};
173
174static void rt2x00usb_register_read_async_cb(struct urb *urb)
175{
176 struct rt2x00_async_read_data *rd = urb->context;
177 if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
178 if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
179 kfree(rd);
180 } else
181 kfree(rd);
182}
183
184void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
185 const unsigned int offset,
186 bool (*callback)(struct rt2x00_dev*, int, u32))
187{
188 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
189 struct urb *urb;
190 struct rt2x00_async_read_data *rd;
191
192 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
193 if (!rd)
194 return;
195
196 urb = usb_alloc_urb(0, GFP_ATOMIC);
197 if (!urb) {
198 kfree(rd);
199 return;
200 }
201
202 rd->rt2x00dev = rt2x00dev;
203 rd->callback = callback;
204 rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
205 rd->cr.bRequest = USB_MULTI_READ;
206 rd->cr.wValue = 0;
207 rd->cr.wIndex = cpu_to_le16(offset);
208 rd->cr.wLength = cpu_to_le16(sizeof(u32));
209
210 usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
211 (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
212 rt2x00usb_register_read_async_cb, rd);
213 if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
214 kfree(rd);
215 usb_free_urb(urb);
216}
217EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
218
219/*
220 * TX data handlers.
221 */
222static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
223{
224 /*
225 * If the transfer to hardware succeeded, it does not mean the
226 * frame was send out correctly. It only means the frame
227 * was successfully pushed to the hardware, we have no
228 * way to determine the transmission status right now.
229 * (Only indirectly by looking at the failed TX counters
230 * in the register).
231 */
232 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
233 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
234 else
235 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
236}
237
238static void rt2x00usb_work_txdone(struct work_struct *work)
239{
240 struct rt2x00_dev *rt2x00dev =
241 container_of(work, struct rt2x00_dev, txdone_work);
242 struct data_queue *queue;
243 struct queue_entry *entry;
244
245 tx_queue_for_each(rt2x00dev, queue) {
246 while (!rt2x00queue_empty(queue)) {
247 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
248
249 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
250 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
251 break;
252
253 rt2x00usb_work_txdone_entry(entry);
254 }
255 }
256}
257
258static void rt2x00usb_interrupt_txdone(struct urb *urb)
259{
260 struct queue_entry *entry = (struct queue_entry *)urb->context;
261 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
262
263 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
264 return;
265 /*
266 * Check if the frame was correctly uploaded
267 */
268 if (urb->status)
269 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
270 /*
271 * Report the frame as DMA done
272 */
273 rt2x00lib_dmadone(entry);
274
275 if (rt2x00dev->ops->lib->tx_dma_done)
276 rt2x00dev->ops->lib->tx_dma_done(entry);
277 /*
278 * Schedule the delayed work for reading the TX status
279 * from the device.
280 */
281 if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) ||
282 !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
283 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
284}
285
286static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
287{
288 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
289 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
290 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
291 u32 length;
292 int status;
293
294 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
295 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
296 return false;
297
298 /*
299 * USB devices require certain padding at the end of each frame
300 * and urb. Those paddings are not included in skbs. Pass entry
301 * to the driver to determine what the overall length should be.
302 */
303 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
304
305 status = skb_padto(entry->skb, length);
306 if (unlikely(status)) {
307 /* TODO: report something more appropriate than IO_FAILED. */
308 rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
309 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
310 rt2x00lib_dmadone(entry);
311
312 return false;
313 }
314
315 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
316 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
317 entry->skb->data, length,
318 rt2x00usb_interrupt_txdone, entry);
319
320 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
321 if (status) {
322 if (status == -ENODEV)
323 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
324 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
325 rt2x00lib_dmadone(entry);
326 }
327
328 return false;
329}
330
331/*
332 * RX data handlers.
333 */
334static void rt2x00usb_work_rxdone(struct work_struct *work)
335{
336 struct rt2x00_dev *rt2x00dev =
337 container_of(work, struct rt2x00_dev, rxdone_work);
338 struct queue_entry *entry;
339 struct skb_frame_desc *skbdesc;
340 u8 rxd[32];
341
342 while (!rt2x00queue_empty(rt2x00dev->rx)) {
343 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
344
345 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
346 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
347 break;
348
349 /*
350 * Fill in desc fields of the skb descriptor
351 */
352 skbdesc = get_skb_frame_desc(entry->skb);
353 skbdesc->desc = rxd;
354 skbdesc->desc_len = entry->queue->desc_size;
355
356 /*
357 * Send the frame to rt2x00lib for further processing.
358 */
359 rt2x00lib_rxdone(entry, GFP_KERNEL);
360 }
361}
362
363static void rt2x00usb_interrupt_rxdone(struct urb *urb)
364{
365 struct queue_entry *entry = (struct queue_entry *)urb->context;
366 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
367
368 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
369 return;
370
371 /*
372 * Report the frame as DMA done
373 */
374 rt2x00lib_dmadone(entry);
375
376 /*
377 * Check if the received data is simply too small
378 * to be actually valid, or if the urb is signaling
379 * a problem.
380 */
381 if (urb->actual_length < entry->queue->desc_size || urb->status)
382 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
383
384 /*
385 * Schedule the delayed work for reading the RX status
386 * from the device.
387 */
388 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
389}
390
391static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
392{
393 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
394 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
395 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
396 int status;
397
398 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
399 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
400 return false;
401
402 rt2x00lib_dmastart(entry);
403
404 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
405 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
406 entry->skb->data, entry->skb->len,
407 rt2x00usb_interrupt_rxdone, entry);
408
409 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
410 if (status) {
411 if (status == -ENODEV)
412 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
413 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
414 rt2x00lib_dmadone(entry);
415 }
416
417 return false;
418}
419
420void rt2x00usb_kick_queue(struct data_queue *queue)
421{
422 switch (queue->qid) {
423 case QID_AC_VO:
424 case QID_AC_VI:
425 case QID_AC_BE:
426 case QID_AC_BK:
427 if (!rt2x00queue_empty(queue))
428 rt2x00queue_for_each_entry(queue,
429 Q_INDEX_DONE,
430 Q_INDEX,
431 NULL,
432 rt2x00usb_kick_tx_entry);
433 break;
434 case QID_RX:
435 if (!rt2x00queue_full(queue))
436 rt2x00queue_for_each_entry(queue,
437 Q_INDEX,
438 Q_INDEX_DONE,
439 NULL,
440 rt2x00usb_kick_rx_entry);
441 break;
442 default:
443 break;
444 }
445}
446EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
447
448static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
449{
450 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
451 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
452 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
453
454 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
455 return false;
456
457 usb_kill_urb(entry_priv->urb);
458
459 /*
460 * Kill guardian urb (if required by driver).
461 */
462 if ((entry->queue->qid == QID_BEACON) &&
463 (test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)))
464 usb_kill_urb(bcn_priv->guardian_urb);
465
466 return false;
467}
468
469void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
470{
471 struct work_struct *completion;
472 unsigned int i;
473
474 if (drop)
475 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
476 rt2x00usb_flush_entry);
477
478 /*
479 * Obtain the queue completion handler
480 */
481 switch (queue->qid) {
482 case QID_AC_VO:
483 case QID_AC_VI:
484 case QID_AC_BE:
485 case QID_AC_BK:
486 completion = &queue->rt2x00dev->txdone_work;
487 break;
488 case QID_RX:
489 completion = &queue->rt2x00dev->rxdone_work;
490 break;
491 default:
492 return;
493 }
494
495 for (i = 0; i < 10; i++) {
496 /*
497 * Check if the driver is already done, otherwise we
498 * have to sleep a little while to give the driver/hw
499 * the oppurtunity to complete interrupt process itself.
500 */
501 if (rt2x00queue_empty(queue))
502 break;
503
504 /*
505 * Schedule the completion handler manually, when this
506 * worker function runs, it should cleanup the queue.
507 */
508 queue_work(queue->rt2x00dev->workqueue, completion);
509
510 /*
511 * Wait for a little while to give the driver
512 * the oppurtunity to recover itself.
513 */
514 msleep(10);
515 }
516}
517EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
518
519static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
520{
521 rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
522 queue->qid);
523
524 rt2x00queue_stop_queue(queue);
525 rt2x00queue_flush_queue(queue, true);
526 rt2x00queue_start_queue(queue);
527}
528
529static int rt2x00usb_dma_timeout(struct data_queue *queue)
530{
531 struct queue_entry *entry;
532
533 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
534 return rt2x00queue_dma_timeout(entry);
535}
536
537void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
538{
539 struct data_queue *queue;
540
541 tx_queue_for_each(rt2x00dev, queue) {
542 if (!rt2x00queue_empty(queue)) {
543 if (rt2x00usb_dma_timeout(queue))
544 rt2x00usb_watchdog_tx_dma(queue);
545 }
546 }
547}
548EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
549
550/*
551 * Radio handlers
552 */
553void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
554{
555 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
556 REGISTER_TIMEOUT);
557}
558EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
559
560/*
561 * Device initialization handlers.
562 */
563void rt2x00usb_clear_entry(struct queue_entry *entry)
564{
565 entry->flags = 0;
566
567 if (entry->queue->qid == QID_RX)
568 rt2x00usb_kick_rx_entry(entry, NULL);
569}
570EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
571
572static void rt2x00usb_assign_endpoint(struct data_queue *queue,
573 struct usb_endpoint_descriptor *ep_desc)
574{
575 struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
576 int pipe;
577
578 queue->usb_endpoint = usb_endpoint_num(ep_desc);
579
580 if (queue->qid == QID_RX) {
581 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
582 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
583 } else {
584 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
585 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
586 }
587
588 if (!queue->usb_maxpacket)
589 queue->usb_maxpacket = 1;
590}
591
592static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
593{
594 struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
595 struct usb_host_interface *intf_desc = intf->cur_altsetting;
596 struct usb_endpoint_descriptor *ep_desc;
597 struct data_queue *queue = rt2x00dev->tx;
598 struct usb_endpoint_descriptor *tx_ep_desc = NULL;
599 unsigned int i;
600
601 /*
602 * Walk through all available endpoints to search for "bulk in"
603 * and "bulk out" endpoints. When we find such endpoints collect
604 * the information we need from the descriptor and assign it
605 * to the queue.
606 */
607 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
608 ep_desc = &intf_desc->endpoint[i].desc;
609
610 if (usb_endpoint_is_bulk_in(ep_desc)) {
611 rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
612 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
613 (queue != queue_end(rt2x00dev))) {
614 rt2x00usb_assign_endpoint(queue, ep_desc);
615 queue = queue_next(queue);
616
617 tx_ep_desc = ep_desc;
618 }
619 }
620
621 /*
622 * At least 1 endpoint for RX and 1 endpoint for TX must be available.
623 */
624 if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
625 rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
626 return -EPIPE;
627 }
628
629 /*
630 * It might be possible not all queues have a dedicated endpoint.
631 * Loop through all TX queues and copy the endpoint information
632 * which we have gathered from already assigned endpoints.
633 */
634 txall_queue_for_each(rt2x00dev, queue) {
635 if (!queue->usb_endpoint)
636 rt2x00usb_assign_endpoint(queue, tx_ep_desc);
637 }
638
639 return 0;
640}
641
642static int rt2x00usb_alloc_entries(struct data_queue *queue)
643{
644 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
645 struct queue_entry_priv_usb *entry_priv;
646 struct queue_entry_priv_usb_bcn *bcn_priv;
647 unsigned int i;
648
649 for (i = 0; i < queue->limit; i++) {
650 entry_priv = queue->entries[i].priv_data;
651 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
652 if (!entry_priv->urb)
653 return -ENOMEM;
654 }
655
656 /*
657 * If this is not the beacon queue or
658 * no guardian byte was required for the beacon,
659 * then we are done.
660 */
661 if (queue->qid != QID_BEACON ||
662 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
663 return 0;
664
665 for (i = 0; i < queue->limit; i++) {
666 bcn_priv = queue->entries[i].priv_data;
667 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
668 if (!bcn_priv->guardian_urb)
669 return -ENOMEM;
670 }
671
672 return 0;
673}
674
675static void rt2x00usb_free_entries(struct data_queue *queue)
676{
677 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
678 struct queue_entry_priv_usb *entry_priv;
679 struct queue_entry_priv_usb_bcn *bcn_priv;
680 unsigned int i;
681
682 if (!queue->entries)
683 return;
684
685 for (i = 0; i < queue->limit; i++) {
686 entry_priv = queue->entries[i].priv_data;
687 usb_kill_urb(entry_priv->urb);
688 usb_free_urb(entry_priv->urb);
689 }
690
691 /*
692 * If this is not the beacon queue or
693 * no guardian byte was required for the beacon,
694 * then we are done.
695 */
696 if (queue->qid != QID_BEACON ||
697 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
698 return;
699
700 for (i = 0; i < queue->limit; i++) {
701 bcn_priv = queue->entries[i].priv_data;
702 usb_kill_urb(bcn_priv->guardian_urb);
703 usb_free_urb(bcn_priv->guardian_urb);
704 }
705}
706
707int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
708{
709 struct data_queue *queue;
710 int status;
711
712 /*
713 * Find endpoints for each queue
714 */
715 status = rt2x00usb_find_endpoints(rt2x00dev);
716 if (status)
717 goto exit;
718
719 /*
720 * Allocate DMA
721 */
722 queue_for_each(rt2x00dev, queue) {
723 status = rt2x00usb_alloc_entries(queue);
724 if (status)
725 goto exit;
726 }
727
728 return 0;
729
730exit:
731 rt2x00usb_uninitialize(rt2x00dev);
732
733 return status;
734}
735EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
736
737void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
738{
739 struct data_queue *queue;
740
741 queue_for_each(rt2x00dev, queue)
742 rt2x00usb_free_entries(queue);
743}
744EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
745
746/*
747 * USB driver handlers.
748 */
749static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
750{
751 kfree(rt2x00dev->rf);
752 rt2x00dev->rf = NULL;
753
754 kfree(rt2x00dev->eeprom);
755 rt2x00dev->eeprom = NULL;
756
757 kfree(rt2x00dev->csr.cache);
758 rt2x00dev->csr.cache = NULL;
759}
760
761static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
762{
763 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
764 if (!rt2x00dev->csr.cache)
765 goto exit;
766
767 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
768 if (!rt2x00dev->eeprom)
769 goto exit;
770
771 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
772 if (!rt2x00dev->rf)
773 goto exit;
774
775 return 0;
776
777exit:
778 rt2x00_probe_err("Failed to allocate registers\n");
779
780 rt2x00usb_free_reg(rt2x00dev);
781
782 return -ENOMEM;
783}
784
785int rt2x00usb_probe(struct usb_interface *usb_intf,
786 const struct rt2x00_ops *ops)
787{
788 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
789 struct ieee80211_hw *hw;
790 struct rt2x00_dev *rt2x00dev;
791 int retval;
792
793 usb_dev = usb_get_dev(usb_dev);
794 usb_reset_device(usb_dev);
795
796 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
797 if (!hw) {
798 rt2x00_probe_err("Failed to allocate hardware\n");
799 retval = -ENOMEM;
800 goto exit_put_device;
801 }
802
803 usb_set_intfdata(usb_intf, hw);
804
805 rt2x00dev = hw->priv;
806 rt2x00dev->dev = &usb_intf->dev;
807 rt2x00dev->ops = ops;
808 rt2x00dev->hw = hw;
809
810 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
811
812 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
813 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
814 hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
815 HRTIMER_MODE_REL);
816
817 retval = rt2x00usb_alloc_reg(rt2x00dev);
818 if (retval)
819 goto exit_free_device;
820
821 retval = rt2x00lib_probe_dev(rt2x00dev);
822 if (retval)
823 goto exit_free_reg;
824
825 return 0;
826
827exit_free_reg:
828 rt2x00usb_free_reg(rt2x00dev);
829
830exit_free_device:
831 ieee80211_free_hw(hw);
832
833exit_put_device:
834 usb_put_dev(usb_dev);
835
836 usb_set_intfdata(usb_intf, NULL);
837
838 return retval;
839}
840EXPORT_SYMBOL_GPL(rt2x00usb_probe);
841
842void rt2x00usb_disconnect(struct usb_interface *usb_intf)
843{
844 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
845 struct rt2x00_dev *rt2x00dev = hw->priv;
846
847 /*
848 * Free all allocated data.
849 */
850 rt2x00lib_remove_dev(rt2x00dev);
851 rt2x00usb_free_reg(rt2x00dev);
852 ieee80211_free_hw(hw);
853
854 /*
855 * Free the USB device data.
856 */
857 usb_set_intfdata(usb_intf, NULL);
858 usb_put_dev(interface_to_usbdev(usb_intf));
859}
860EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
861
862#ifdef CONFIG_PM
863int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
864{
865 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
866 struct rt2x00_dev *rt2x00dev = hw->priv;
867
868 return rt2x00lib_suspend(rt2x00dev, state);
869}
870EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
871
872int rt2x00usb_resume(struct usb_interface *usb_intf)
873{
874 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
875 struct rt2x00_dev *rt2x00dev = hw->priv;
876
877 return rt2x00lib_resume(rt2x00dev);
878}
879EXPORT_SYMBOL_GPL(rt2x00usb_resume);
880#endif /* CONFIG_PM */
881
882/*
883 * rt2x00usb module information.
884 */
885MODULE_AUTHOR(DRV_PROJECT);
886MODULE_VERSION(DRV_VERSION);
887MODULE_DESCRIPTION("rt2x00 usb library");
888MODULE_LICENSE("GPL");