Loading...
1/*
2 * Copyright (C) 2003-2008 Takahiro Hirofuchi
3 *
4 * This is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
18 */
19
20#include <asm/byteorder.h>
21#include <linux/kthread.h>
22#include <linux/usb.h>
23#include <linux/usb/hcd.h>
24
25#include "usbip_common.h"
26#include "stub.h"
27
28static int is_clear_halt_cmd(struct urb *urb)
29{
30 struct usb_ctrlrequest *req;
31
32 req = (struct usb_ctrlrequest *) urb->setup_packet;
33
34 return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
35 (req->bRequestType == USB_RECIP_ENDPOINT) &&
36 (req->wValue == USB_ENDPOINT_HALT);
37}
38
39static int is_set_interface_cmd(struct urb *urb)
40{
41 struct usb_ctrlrequest *req;
42
43 req = (struct usb_ctrlrequest *) urb->setup_packet;
44
45 return (req->bRequest == USB_REQ_SET_INTERFACE) &&
46 (req->bRequestType == USB_RECIP_INTERFACE);
47}
48
49static int is_set_configuration_cmd(struct urb *urb)
50{
51 struct usb_ctrlrequest *req;
52
53 req = (struct usb_ctrlrequest *) urb->setup_packet;
54
55 return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
56 (req->bRequestType == USB_RECIP_DEVICE);
57}
58
59static int is_reset_device_cmd(struct urb *urb)
60{
61 struct usb_ctrlrequest *req;
62 __u16 value;
63 __u16 index;
64
65 req = (struct usb_ctrlrequest *) urb->setup_packet;
66 value = le16_to_cpu(req->wValue);
67 index = le16_to_cpu(req->wIndex);
68
69 if ((req->bRequest == USB_REQ_SET_FEATURE) &&
70 (req->bRequestType == USB_RT_PORT) &&
71 (value == USB_PORT_FEAT_RESET)) {
72 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
73 return 1;
74 } else
75 return 0;
76}
77
78static int tweak_clear_halt_cmd(struct urb *urb)
79{
80 struct usb_ctrlrequest *req;
81 int target_endp;
82 int target_dir;
83 int target_pipe;
84 int ret;
85
86 req = (struct usb_ctrlrequest *) urb->setup_packet;
87
88 /*
89 * The stalled endpoint is specified in the wIndex value. The endpoint
90 * of the urb is the target of this clear_halt request (i.e., control
91 * endpoint).
92 */
93 target_endp = le16_to_cpu(req->wIndex) & 0x000f;
94
95 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
96 target_dir = le16_to_cpu(req->wIndex) & 0x0080;
97
98 if (target_dir)
99 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
100 else
101 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
102
103 ret = usb_clear_halt(urb->dev, target_pipe);
104 if (ret < 0)
105 dev_err(&urb->dev->dev,
106 "usb_clear_halt error: devnum %d endp %d ret %d\n",
107 urb->dev->devnum, target_endp, ret);
108 else
109 dev_info(&urb->dev->dev,
110 "usb_clear_halt done: devnum %d endp %d\n",
111 urb->dev->devnum, target_endp);
112
113 return ret;
114}
115
116static int tweak_set_interface_cmd(struct urb *urb)
117{
118 struct usb_ctrlrequest *req;
119 __u16 alternate;
120 __u16 interface;
121 int ret;
122
123 req = (struct usb_ctrlrequest *) urb->setup_packet;
124 alternate = le16_to_cpu(req->wValue);
125 interface = le16_to_cpu(req->wIndex);
126
127 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
128 interface, alternate);
129
130 ret = usb_set_interface(urb->dev, interface, alternate);
131 if (ret < 0)
132 dev_err(&urb->dev->dev,
133 "usb_set_interface error: inf %u alt %u ret %d\n",
134 interface, alternate, ret);
135 else
136 dev_info(&urb->dev->dev,
137 "usb_set_interface done: inf %u alt %u\n",
138 interface, alternate);
139
140 return ret;
141}
142
143static int tweak_set_configuration_cmd(struct urb *urb)
144{
145 struct stub_priv *priv = (struct stub_priv *) urb->context;
146 struct stub_device *sdev = priv->sdev;
147 struct usb_ctrlrequest *req;
148 __u16 config;
149 int err;
150
151 req = (struct usb_ctrlrequest *) urb->setup_packet;
152 config = le16_to_cpu(req->wValue);
153
154 err = usb_set_configuration(sdev->udev, config);
155 if (err && err != -ENODEV)
156 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
157 config, err);
158 return 0;
159}
160
161static int tweak_reset_device_cmd(struct urb *urb)
162{
163 struct stub_priv *priv = (struct stub_priv *) urb->context;
164 struct stub_device *sdev = priv->sdev;
165
166 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
167
168 /*
169 * With the implementation of pre_reset and post_reset the driver no
170 * longer unbinds. This allows the use of synchronous reset.
171 */
172
173 if (usb_lock_device_for_reset(sdev->udev, sdev->interface) < 0) {
174 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
175 return 0;
176 }
177 usb_reset_device(sdev->udev);
178 usb_unlock_device(sdev->udev);
179
180 return 0;
181}
182
183/*
184 * clear_halt, set_interface, and set_configuration require special tricks.
185 */
186static void tweak_special_requests(struct urb *urb)
187{
188 if (!urb || !urb->setup_packet)
189 return;
190
191 if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
192 return;
193
194 if (is_clear_halt_cmd(urb))
195 /* tweak clear_halt */
196 tweak_clear_halt_cmd(urb);
197
198 else if (is_set_interface_cmd(urb))
199 /* tweak set_interface */
200 tweak_set_interface_cmd(urb);
201
202 else if (is_set_configuration_cmd(urb))
203 /* tweak set_configuration */
204 tweak_set_configuration_cmd(urb);
205
206 else if (is_reset_device_cmd(urb))
207 tweak_reset_device_cmd(urb);
208 else
209 usbip_dbg_stub_rx("no need to tweak\n");
210}
211
212/*
213 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
214 * By unlinking the urb asynchronously, stub_rx can continuously
215 * process coming urbs. Even if the urb is unlinked, its completion
216 * handler will be called and stub_tx will send a return pdu.
217 *
218 * See also comments about unlinking strategy in vhci_hcd.c.
219 */
220static int stub_recv_cmd_unlink(struct stub_device *sdev,
221 struct usbip_header *pdu)
222{
223 int ret;
224 unsigned long flags;
225 struct stub_priv *priv;
226
227 spin_lock_irqsave(&sdev->priv_lock, flags);
228
229 list_for_each_entry(priv, &sdev->priv_init, list) {
230 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
231 continue;
232
233 dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
234 priv->urb);
235
236 /*
237 * This matched urb is not completed yet (i.e., be in
238 * flight in usb hcd hardware/driver). Now we are
239 * cancelling it. The unlinking flag means that we are
240 * now not going to return the normal result pdu of a
241 * submission request, but going to return a result pdu
242 * of the unlink request.
243 */
244 priv->unlinking = 1;
245
246 /*
247 * In the case that unlinking flag is on, prev->seqnum
248 * is changed from the seqnum of the cancelling urb to
249 * the seqnum of the unlink request. This will be used
250 * to make the result pdu of the unlink request.
251 */
252 priv->seqnum = pdu->base.seqnum;
253
254 spin_unlock_irqrestore(&sdev->priv_lock, flags);
255
256 /*
257 * usb_unlink_urb() is now out of spinlocking to avoid
258 * spinlock recursion since stub_complete() is
259 * sometimes called in this context but not in the
260 * interrupt context. If stub_complete() is executed
261 * before we call usb_unlink_urb(), usb_unlink_urb()
262 * will return an error value. In this case, stub_tx
263 * will return the result pdu of this unlink request
264 * though submission is completed and actual unlinking
265 * is not executed. OK?
266 */
267 /* In the above case, urb->status is not -ECONNRESET,
268 * so a driver in a client host will know the failure
269 * of the unlink request ?
270 */
271 ret = usb_unlink_urb(priv->urb);
272 if (ret != -EINPROGRESS)
273 dev_err(&priv->urb->dev->dev,
274 "failed to unlink a urb %p, ret %d\n",
275 priv->urb, ret);
276
277 return 0;
278 }
279
280 usbip_dbg_stub_rx("seqnum %d is not pending\n",
281 pdu->u.cmd_unlink.seqnum);
282
283 /*
284 * The urb of the unlink target is not found in priv_init queue. It was
285 * already completed and its results is/was going to be sent by a
286 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
287 * return the completeness of this unlink request to vhci_hcd.
288 */
289 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
290
291 spin_unlock_irqrestore(&sdev->priv_lock, flags);
292
293 return 0;
294}
295
296static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
297{
298 struct usbip_device *ud = &sdev->ud;
299 int valid = 0;
300
301 if (pdu->base.devid == sdev->devid) {
302 spin_lock_irq(&ud->lock);
303 if (ud->status == SDEV_ST_USED) {
304 /* A request is valid. */
305 valid = 1;
306 }
307 spin_unlock_irq(&ud->lock);
308 }
309
310 return valid;
311}
312
313static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
314 struct usbip_header *pdu)
315{
316 struct stub_priv *priv;
317 struct usbip_device *ud = &sdev->ud;
318 unsigned long flags;
319
320 spin_lock_irqsave(&sdev->priv_lock, flags);
321
322 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
323 if (!priv) {
324 dev_err(&sdev->interface->dev, "alloc stub_priv\n");
325 spin_unlock_irqrestore(&sdev->priv_lock, flags);
326 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
327 return NULL;
328 }
329
330 priv->seqnum = pdu->base.seqnum;
331 priv->sdev = sdev;
332
333 /*
334 * After a stub_priv is linked to a list_head,
335 * our error handler can free allocated data.
336 */
337 list_add_tail(&priv->list, &sdev->priv_init);
338
339 spin_unlock_irqrestore(&sdev->priv_lock, flags);
340
341 return priv;
342}
343
344static int get_pipe(struct stub_device *sdev, int epnum, int dir)
345{
346 struct usb_device *udev = sdev->udev;
347 struct usb_host_endpoint *ep;
348 struct usb_endpoint_descriptor *epd = NULL;
349
350 if (dir == USBIP_DIR_IN)
351 ep = udev->ep_in[epnum & 0x7f];
352 else
353 ep = udev->ep_out[epnum & 0x7f];
354 if (!ep) {
355 dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
356 epnum);
357 BUG();
358 }
359
360 epd = &ep->desc;
361 if (usb_endpoint_xfer_control(epd)) {
362 if (dir == USBIP_DIR_OUT)
363 return usb_sndctrlpipe(udev, epnum);
364 else
365 return usb_rcvctrlpipe(udev, epnum);
366 }
367
368 if (usb_endpoint_xfer_bulk(epd)) {
369 if (dir == USBIP_DIR_OUT)
370 return usb_sndbulkpipe(udev, epnum);
371 else
372 return usb_rcvbulkpipe(udev, epnum);
373 }
374
375 if (usb_endpoint_xfer_int(epd)) {
376 if (dir == USBIP_DIR_OUT)
377 return usb_sndintpipe(udev, epnum);
378 else
379 return usb_rcvintpipe(udev, epnum);
380 }
381
382 if (usb_endpoint_xfer_isoc(epd)) {
383 if (dir == USBIP_DIR_OUT)
384 return usb_sndisocpipe(udev, epnum);
385 else
386 return usb_rcvisocpipe(udev, epnum);
387 }
388
389 /* NOT REACHED */
390 dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
391 return 0;
392}
393
394static void masking_bogus_flags(struct urb *urb)
395{
396 int xfertype;
397 struct usb_device *dev;
398 struct usb_host_endpoint *ep;
399 int is_out;
400 unsigned int allowed;
401
402 if (!urb || urb->hcpriv || !urb->complete)
403 return;
404 dev = urb->dev;
405 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
406 return;
407
408 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
409 [usb_pipeendpoint(urb->pipe)];
410 if (!ep)
411 return;
412
413 xfertype = usb_endpoint_type(&ep->desc);
414 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
415 struct usb_ctrlrequest *setup =
416 (struct usb_ctrlrequest *) urb->setup_packet;
417
418 if (!setup)
419 return;
420 is_out = !(setup->bRequestType & USB_DIR_IN) ||
421 !setup->wLength;
422 } else {
423 is_out = usb_endpoint_dir_out(&ep->desc);
424 }
425
426 /* enforce simple/standard policy */
427 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
428 URB_DIR_MASK | URB_FREE_BUFFER);
429 switch (xfertype) {
430 case USB_ENDPOINT_XFER_BULK:
431 if (is_out)
432 allowed |= URB_ZERO_PACKET;
433 /* FALLTHROUGH */
434 case USB_ENDPOINT_XFER_CONTROL:
435 allowed |= URB_NO_FSBR; /* only affects UHCI */
436 /* FALLTHROUGH */
437 default: /* all non-iso endpoints */
438 if (!is_out)
439 allowed |= URB_SHORT_NOT_OK;
440 break;
441 case USB_ENDPOINT_XFER_ISOC:
442 allowed |= URB_ISO_ASAP;
443 break;
444 }
445 urb->transfer_flags &= allowed;
446}
447
448static void stub_recv_cmd_submit(struct stub_device *sdev,
449 struct usbip_header *pdu)
450{
451 int ret;
452 struct stub_priv *priv;
453 struct usbip_device *ud = &sdev->ud;
454 struct usb_device *udev = sdev->udev;
455 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
456
457 priv = stub_priv_alloc(sdev, pdu);
458 if (!priv)
459 return;
460
461 /* setup a urb */
462 if (usb_pipeisoc(pipe))
463 priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
464 GFP_KERNEL);
465 else
466 priv->urb = usb_alloc_urb(0, GFP_KERNEL);
467
468 if (!priv->urb) {
469 dev_err(&sdev->interface->dev, "malloc urb\n");
470 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
471 return;
472 }
473
474 /* allocate urb transfer buffer, if needed */
475 if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
476 priv->urb->transfer_buffer =
477 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
478 GFP_KERNEL);
479 if (!priv->urb->transfer_buffer) {
480 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
481 return;
482 }
483 }
484
485 /* copy urb setup packet */
486 priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
487 GFP_KERNEL);
488 if (!priv->urb->setup_packet) {
489 dev_err(&sdev->interface->dev, "allocate setup_packet\n");
490 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
491 return;
492 }
493
494 /* set other members from the base header of pdu */
495 priv->urb->context = (void *) priv;
496 priv->urb->dev = udev;
497 priv->urb->pipe = pipe;
498 priv->urb->complete = stub_complete;
499
500 usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
501
502
503 if (usbip_recv_xbuff(ud, priv->urb) < 0)
504 return;
505
506 if (usbip_recv_iso(ud, priv->urb) < 0)
507 return;
508
509 /* no need to submit an intercepted request, but harmless? */
510 tweak_special_requests(priv->urb);
511
512 masking_bogus_flags(priv->urb);
513 /* urb is now ready to submit */
514 ret = usb_submit_urb(priv->urb, GFP_KERNEL);
515
516 if (ret == 0)
517 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
518 pdu->base.seqnum);
519 else {
520 dev_err(&sdev->interface->dev, "submit_urb error, %d\n", ret);
521 usbip_dump_header(pdu);
522 usbip_dump_urb(priv->urb);
523
524 /*
525 * Pessimistic.
526 * This connection will be discarded.
527 */
528 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
529 }
530
531 usbip_dbg_stub_rx("Leave\n");
532}
533
534/* recv a pdu */
535static void stub_rx_pdu(struct usbip_device *ud)
536{
537 int ret;
538 struct usbip_header pdu;
539 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
540 struct device *dev = &sdev->udev->dev;
541
542 usbip_dbg_stub_rx("Enter\n");
543
544 memset(&pdu, 0, sizeof(pdu));
545
546 /* receive a pdu header */
547 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
548 if (ret != sizeof(pdu)) {
549 dev_err(dev, "recv a header, %d\n", ret);
550 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
551 return;
552 }
553
554 usbip_header_correct_endian(&pdu, 0);
555
556 if (usbip_dbg_flag_stub_rx)
557 usbip_dump_header(&pdu);
558
559 if (!valid_request(sdev, &pdu)) {
560 dev_err(dev, "recv invalid request\n");
561 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
562 return;
563 }
564
565 switch (pdu.base.command) {
566 case USBIP_CMD_UNLINK:
567 stub_recv_cmd_unlink(sdev, &pdu);
568 break;
569
570 case USBIP_CMD_SUBMIT:
571 stub_recv_cmd_submit(sdev, &pdu);
572 break;
573
574 default:
575 /* NOTREACHED */
576 dev_err(dev, "unknown pdu\n");
577 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
578 break;
579 }
580}
581
582int stub_rx_loop(void *data)
583{
584 struct usbip_device *ud = data;
585
586 while (!kthread_should_stop()) {
587 if (usbip_event_happened(ud))
588 break;
589
590 stub_rx_pdu(ud);
591 }
592
593 return 0;
594}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2003-2008 Takahiro Hirofuchi
4 */
5
6#include <asm/byteorder.h>
7#include <linux/kthread.h>
8#include <linux/usb.h>
9#include <linux/usb/hcd.h>
10
11#include "usbip_common.h"
12#include "stub.h"
13
14static int is_clear_halt_cmd(struct urb *urb)
15{
16 struct usb_ctrlrequest *req;
17
18 req = (struct usb_ctrlrequest *) urb->setup_packet;
19
20 return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
21 (req->bRequestType == USB_RECIP_ENDPOINT) &&
22 (req->wValue == USB_ENDPOINT_HALT);
23}
24
25static int is_set_interface_cmd(struct urb *urb)
26{
27 struct usb_ctrlrequest *req;
28
29 req = (struct usb_ctrlrequest *) urb->setup_packet;
30
31 return (req->bRequest == USB_REQ_SET_INTERFACE) &&
32 (req->bRequestType == USB_RECIP_INTERFACE);
33}
34
35static int is_set_configuration_cmd(struct urb *urb)
36{
37 struct usb_ctrlrequest *req;
38
39 req = (struct usb_ctrlrequest *) urb->setup_packet;
40
41 return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
42 (req->bRequestType == USB_RECIP_DEVICE);
43}
44
45static int is_reset_device_cmd(struct urb *urb)
46{
47 struct usb_ctrlrequest *req;
48 __u16 value;
49 __u16 index;
50
51 req = (struct usb_ctrlrequest *) urb->setup_packet;
52 value = le16_to_cpu(req->wValue);
53 index = le16_to_cpu(req->wIndex);
54
55 if ((req->bRequest == USB_REQ_SET_FEATURE) &&
56 (req->bRequestType == USB_RT_PORT) &&
57 (value == USB_PORT_FEAT_RESET)) {
58 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
59 return 1;
60 } else
61 return 0;
62}
63
64static int tweak_clear_halt_cmd(struct urb *urb)
65{
66 struct usb_ctrlrequest *req;
67 int target_endp;
68 int target_dir;
69 int target_pipe;
70 int ret;
71
72 req = (struct usb_ctrlrequest *) urb->setup_packet;
73
74 /*
75 * The stalled endpoint is specified in the wIndex value. The endpoint
76 * of the urb is the target of this clear_halt request (i.e., control
77 * endpoint).
78 */
79 target_endp = le16_to_cpu(req->wIndex) & 0x000f;
80
81 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
82 target_dir = le16_to_cpu(req->wIndex) & 0x0080;
83
84 if (target_dir)
85 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
86 else
87 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
88
89 ret = usb_clear_halt(urb->dev, target_pipe);
90 if (ret < 0)
91 dev_err(&urb->dev->dev,
92 "usb_clear_halt error: devnum %d endp %d ret %d\n",
93 urb->dev->devnum, target_endp, ret);
94 else
95 dev_info(&urb->dev->dev,
96 "usb_clear_halt done: devnum %d endp %d\n",
97 urb->dev->devnum, target_endp);
98
99 return ret;
100}
101
102static int tweak_set_interface_cmd(struct urb *urb)
103{
104 struct usb_ctrlrequest *req;
105 __u16 alternate;
106 __u16 interface;
107 int ret;
108
109 req = (struct usb_ctrlrequest *) urb->setup_packet;
110 alternate = le16_to_cpu(req->wValue);
111 interface = le16_to_cpu(req->wIndex);
112
113 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
114 interface, alternate);
115
116 ret = usb_set_interface(urb->dev, interface, alternate);
117 if (ret < 0)
118 dev_err(&urb->dev->dev,
119 "usb_set_interface error: inf %u alt %u ret %d\n",
120 interface, alternate, ret);
121 else
122 dev_info(&urb->dev->dev,
123 "usb_set_interface done: inf %u alt %u\n",
124 interface, alternate);
125
126 return ret;
127}
128
129static int tweak_set_configuration_cmd(struct urb *urb)
130{
131 struct stub_priv *priv = (struct stub_priv *) urb->context;
132 struct stub_device *sdev = priv->sdev;
133 struct usb_ctrlrequest *req;
134 __u16 config;
135 int err;
136
137 req = (struct usb_ctrlrequest *) urb->setup_packet;
138 config = le16_to_cpu(req->wValue);
139
140 err = usb_set_configuration(sdev->udev, config);
141 if (err && err != -ENODEV)
142 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
143 config, err);
144 return 0;
145}
146
147static int tweak_reset_device_cmd(struct urb *urb)
148{
149 struct stub_priv *priv = (struct stub_priv *) urb->context;
150 struct stub_device *sdev = priv->sdev;
151
152 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
153
154 if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
155 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
156 return 0;
157 }
158 usb_reset_device(sdev->udev);
159 usb_unlock_device(sdev->udev);
160
161 return 0;
162}
163
164/*
165 * clear_halt, set_interface, and set_configuration require special tricks.
166 */
167static void tweak_special_requests(struct urb *urb)
168{
169 if (!urb || !urb->setup_packet)
170 return;
171
172 if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
173 return;
174
175 if (is_clear_halt_cmd(urb))
176 /* tweak clear_halt */
177 tweak_clear_halt_cmd(urb);
178
179 else if (is_set_interface_cmd(urb))
180 /* tweak set_interface */
181 tweak_set_interface_cmd(urb);
182
183 else if (is_set_configuration_cmd(urb))
184 /* tweak set_configuration */
185 tweak_set_configuration_cmd(urb);
186
187 else if (is_reset_device_cmd(urb))
188 tweak_reset_device_cmd(urb);
189 else
190 usbip_dbg_stub_rx("no need to tweak\n");
191}
192
193/*
194 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
195 * By unlinking the urb asynchronously, stub_rx can continuously
196 * process coming urbs. Even if the urb is unlinked, its completion
197 * handler will be called and stub_tx will send a return pdu.
198 *
199 * See also comments about unlinking strategy in vhci_hcd.c.
200 */
201static int stub_recv_cmd_unlink(struct stub_device *sdev,
202 struct usbip_header *pdu)
203{
204 int ret;
205 unsigned long flags;
206 struct stub_priv *priv;
207
208 spin_lock_irqsave(&sdev->priv_lock, flags);
209
210 list_for_each_entry(priv, &sdev->priv_init, list) {
211 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
212 continue;
213
214 /*
215 * This matched urb is not completed yet (i.e., be in
216 * flight in usb hcd hardware/driver). Now we are
217 * cancelling it. The unlinking flag means that we are
218 * now not going to return the normal result pdu of a
219 * submission request, but going to return a result pdu
220 * of the unlink request.
221 */
222 priv->unlinking = 1;
223
224 /*
225 * In the case that unlinking flag is on, prev->seqnum
226 * is changed from the seqnum of the cancelling urb to
227 * the seqnum of the unlink request. This will be used
228 * to make the result pdu of the unlink request.
229 */
230 priv->seqnum = pdu->base.seqnum;
231
232 spin_unlock_irqrestore(&sdev->priv_lock, flags);
233
234 /*
235 * usb_unlink_urb() is now out of spinlocking to avoid
236 * spinlock recursion since stub_complete() is
237 * sometimes called in this context but not in the
238 * interrupt context. If stub_complete() is executed
239 * before we call usb_unlink_urb(), usb_unlink_urb()
240 * will return an error value. In this case, stub_tx
241 * will return the result pdu of this unlink request
242 * though submission is completed and actual unlinking
243 * is not executed. OK?
244 */
245 /* In the above case, urb->status is not -ECONNRESET,
246 * so a driver in a client host will know the failure
247 * of the unlink request ?
248 */
249 ret = usb_unlink_urb(priv->urb);
250 if (ret != -EINPROGRESS)
251 dev_err(&priv->urb->dev->dev,
252 "failed to unlink a urb # %lu, ret %d\n",
253 priv->seqnum, ret);
254
255 return 0;
256 }
257
258 usbip_dbg_stub_rx("seqnum %d is not pending\n",
259 pdu->u.cmd_unlink.seqnum);
260
261 /*
262 * The urb of the unlink target is not found in priv_init queue. It was
263 * already completed and its results is/was going to be sent by a
264 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
265 * return the completeness of this unlink request to vhci_hcd.
266 */
267 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
268
269 spin_unlock_irqrestore(&sdev->priv_lock, flags);
270
271 return 0;
272}
273
274static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
275{
276 struct usbip_device *ud = &sdev->ud;
277 int valid = 0;
278
279 if (pdu->base.devid == sdev->devid) {
280 spin_lock_irq(&ud->lock);
281 if (ud->status == SDEV_ST_USED) {
282 /* A request is valid. */
283 valid = 1;
284 }
285 spin_unlock_irq(&ud->lock);
286 }
287
288 return valid;
289}
290
291static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
292 struct usbip_header *pdu)
293{
294 struct stub_priv *priv;
295 struct usbip_device *ud = &sdev->ud;
296 unsigned long flags;
297
298 spin_lock_irqsave(&sdev->priv_lock, flags);
299
300 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
301 if (!priv) {
302 dev_err(&sdev->udev->dev, "alloc stub_priv\n");
303 spin_unlock_irqrestore(&sdev->priv_lock, flags);
304 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
305 return NULL;
306 }
307
308 priv->seqnum = pdu->base.seqnum;
309 priv->sdev = sdev;
310
311 /*
312 * After a stub_priv is linked to a list_head,
313 * our error handler can free allocated data.
314 */
315 list_add_tail(&priv->list, &sdev->priv_init);
316
317 spin_unlock_irqrestore(&sdev->priv_lock, flags);
318
319 return priv;
320}
321
322static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
323{
324 struct usb_device *udev = sdev->udev;
325 struct usb_host_endpoint *ep;
326 struct usb_endpoint_descriptor *epd = NULL;
327 int epnum = pdu->base.ep;
328 int dir = pdu->base.direction;
329
330 if (epnum < 0 || epnum > 15)
331 goto err_ret;
332
333 if (dir == USBIP_DIR_IN)
334 ep = udev->ep_in[epnum & 0x7f];
335 else
336 ep = udev->ep_out[epnum & 0x7f];
337 if (!ep)
338 goto err_ret;
339
340 epd = &ep->desc;
341
342 if (usb_endpoint_xfer_control(epd)) {
343 if (dir == USBIP_DIR_OUT)
344 return usb_sndctrlpipe(udev, epnum);
345 else
346 return usb_rcvctrlpipe(udev, epnum);
347 }
348
349 if (usb_endpoint_xfer_bulk(epd)) {
350 if (dir == USBIP_DIR_OUT)
351 return usb_sndbulkpipe(udev, epnum);
352 else
353 return usb_rcvbulkpipe(udev, epnum);
354 }
355
356 if (usb_endpoint_xfer_int(epd)) {
357 if (dir == USBIP_DIR_OUT)
358 return usb_sndintpipe(udev, epnum);
359 else
360 return usb_rcvintpipe(udev, epnum);
361 }
362
363 if (usb_endpoint_xfer_isoc(epd)) {
364 /* validate packet size and number of packets */
365 unsigned int maxp, packets, bytes;
366
367 maxp = usb_endpoint_maxp(epd);
368 maxp *= usb_endpoint_maxp_mult(epd);
369 bytes = pdu->u.cmd_submit.transfer_buffer_length;
370 packets = DIV_ROUND_UP(bytes, maxp);
371
372 if (pdu->u.cmd_submit.number_of_packets < 0 ||
373 pdu->u.cmd_submit.number_of_packets > packets) {
374 dev_err(&sdev->udev->dev,
375 "CMD_SUBMIT: isoc invalid num packets %d\n",
376 pdu->u.cmd_submit.number_of_packets);
377 return -1;
378 }
379 if (dir == USBIP_DIR_OUT)
380 return usb_sndisocpipe(udev, epnum);
381 else
382 return usb_rcvisocpipe(udev, epnum);
383 }
384
385err_ret:
386 /* NOT REACHED */
387 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
388 return -1;
389}
390
391static void masking_bogus_flags(struct urb *urb)
392{
393 int xfertype;
394 struct usb_device *dev;
395 struct usb_host_endpoint *ep;
396 int is_out;
397 unsigned int allowed;
398
399 if (!urb || urb->hcpriv || !urb->complete)
400 return;
401 dev = urb->dev;
402 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
403 return;
404
405 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
406 [usb_pipeendpoint(urb->pipe)];
407 if (!ep)
408 return;
409
410 xfertype = usb_endpoint_type(&ep->desc);
411 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
412 struct usb_ctrlrequest *setup =
413 (struct usb_ctrlrequest *) urb->setup_packet;
414
415 if (!setup)
416 return;
417 is_out = !(setup->bRequestType & USB_DIR_IN) ||
418 !setup->wLength;
419 } else {
420 is_out = usb_endpoint_dir_out(&ep->desc);
421 }
422
423 /* enforce simple/standard policy */
424 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
425 URB_DIR_MASK | URB_FREE_BUFFER);
426 switch (xfertype) {
427 case USB_ENDPOINT_XFER_BULK:
428 if (is_out)
429 allowed |= URB_ZERO_PACKET;
430 /* FALLTHROUGH */
431 default: /* all non-iso endpoints */
432 if (!is_out)
433 allowed |= URB_SHORT_NOT_OK;
434 break;
435 case USB_ENDPOINT_XFER_ISOC:
436 allowed |= URB_ISO_ASAP;
437 break;
438 }
439 urb->transfer_flags &= allowed;
440}
441
442static void stub_recv_cmd_submit(struct stub_device *sdev,
443 struct usbip_header *pdu)
444{
445 int ret;
446 struct stub_priv *priv;
447 struct usbip_device *ud = &sdev->ud;
448 struct usb_device *udev = sdev->udev;
449 int pipe = get_pipe(sdev, pdu);
450
451 if (pipe == -1)
452 return;
453
454 priv = stub_priv_alloc(sdev, pdu);
455 if (!priv)
456 return;
457
458 /* setup a urb */
459 if (usb_pipeisoc(pipe))
460 priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
461 GFP_KERNEL);
462 else
463 priv->urb = usb_alloc_urb(0, GFP_KERNEL);
464
465 if (!priv->urb) {
466 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
467 return;
468 }
469
470 /* allocate urb transfer buffer, if needed */
471 if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
472 priv->urb->transfer_buffer =
473 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
474 GFP_KERNEL);
475 if (!priv->urb->transfer_buffer) {
476 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
477 return;
478 }
479 }
480
481 /* copy urb setup packet */
482 priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
483 GFP_KERNEL);
484 if (!priv->urb->setup_packet) {
485 dev_err(&udev->dev, "allocate setup_packet\n");
486 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
487 return;
488 }
489
490 /* set other members from the base header of pdu */
491 priv->urb->context = (void *) priv;
492 priv->urb->dev = udev;
493 priv->urb->pipe = pipe;
494 priv->urb->complete = stub_complete;
495
496 usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
497
498
499 if (usbip_recv_xbuff(ud, priv->urb) < 0)
500 return;
501
502 if (usbip_recv_iso(ud, priv->urb) < 0)
503 return;
504
505 /* no need to submit an intercepted request, but harmless? */
506 tweak_special_requests(priv->urb);
507
508 masking_bogus_flags(priv->urb);
509 /* urb is now ready to submit */
510 ret = usb_submit_urb(priv->urb, GFP_KERNEL);
511
512 if (ret == 0)
513 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
514 pdu->base.seqnum);
515 else {
516 dev_err(&udev->dev, "submit_urb error, %d\n", ret);
517 usbip_dump_header(pdu);
518 usbip_dump_urb(priv->urb);
519
520 /*
521 * Pessimistic.
522 * This connection will be discarded.
523 */
524 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
525 }
526
527 usbip_dbg_stub_rx("Leave\n");
528}
529
530/* recv a pdu */
531static void stub_rx_pdu(struct usbip_device *ud)
532{
533 int ret;
534 struct usbip_header pdu;
535 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
536 struct device *dev = &sdev->udev->dev;
537
538 usbip_dbg_stub_rx("Enter\n");
539
540 memset(&pdu, 0, sizeof(pdu));
541
542 /* receive a pdu header */
543 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
544 if (ret != sizeof(pdu)) {
545 dev_err(dev, "recv a header, %d\n", ret);
546 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
547 return;
548 }
549
550 usbip_header_correct_endian(&pdu, 0);
551
552 if (usbip_dbg_flag_stub_rx)
553 usbip_dump_header(&pdu);
554
555 if (!valid_request(sdev, &pdu)) {
556 dev_err(dev, "recv invalid request\n");
557 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
558 return;
559 }
560
561 switch (pdu.base.command) {
562 case USBIP_CMD_UNLINK:
563 stub_recv_cmd_unlink(sdev, &pdu);
564 break;
565
566 case USBIP_CMD_SUBMIT:
567 stub_recv_cmd_submit(sdev, &pdu);
568 break;
569
570 default:
571 /* NOTREACHED */
572 dev_err(dev, "unknown pdu\n");
573 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
574 break;
575 }
576}
577
578int stub_rx_loop(void *data)
579{
580 struct usbip_device *ud = data;
581
582 while (!kthread_should_stop()) {
583 if (usbip_event_happened(ud))
584 break;
585
586 stub_rx_pdu(ud);
587 }
588
589 return 0;
590}