Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4 *
5 * ep0.c - Endpoint 0 handling
6 *
7 * Copyright 2017 IBM Corporation
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/delay.h>
14#include <linux/ioport.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/prefetch.h>
21#include <linux/clk.h>
22#include <linux/usb/gadget.h>
23#include <linux/of.h>
24#include <linux/regmap.h>
25#include <linux/dma-mapping.h>
26
27#include "vhub.h"
28
29int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
30{
31 struct usb_request *req = &ep->ep0.req.req;
32 int rc;
33
34 if (WARN_ON(ep->d_idx != 0))
35 return std_req_stall;
36 if (WARN_ON(!ep->ep0.dir_in))
37 return std_req_stall;
38 if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
39 return std_req_stall;
40 if (WARN_ON(req->status == -EINPROGRESS))
41 return std_req_stall;
42
43 req->buf = ptr;
44 req->length = len;
45 req->complete = NULL;
46 req->zero = true;
47
48 /*
49 * Call internal queue directly after dropping the lock. This is
50 * safe to do as the reply is always the last thing done when
51 * processing a SETUP packet, usually as a tail call
52 */
53 spin_unlock(&ep->vhub->lock);
54 if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
55 rc = std_req_stall;
56 else
57 rc = std_req_data;
58 spin_lock(&ep->vhub->lock);
59 return rc;
60}
61
62int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
63{
64 u8 *buffer = ep->buf;
65 unsigned int i;
66 va_list args;
67
68 va_start(args, len);
69
70 /* Copy data directly into EP buffer */
71 for (i = 0; i < len; i++)
72 buffer[i] = va_arg(args, int);
73 va_end(args);
74
75 /* req->buf NULL means data is already there */
76 return ast_vhub_reply(ep, NULL, len);
77}
78
79void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
80{
81 struct usb_ctrlrequest crq;
82 enum std_req_rc std_req_rc;
83 int rc = -ENODEV;
84
85 if (WARN_ON(ep->d_idx != 0))
86 return;
87
88 /*
89 * Grab the setup packet from the chip and byteswap
90 * interesting fields
91 */
92 memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
93
94 EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
95 crq.bRequestType, crq.bRequest,
96 le16_to_cpu(crq.wValue),
97 le16_to_cpu(crq.wIndex),
98 le16_to_cpu(crq.wLength),
99 (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
100 ep->ep0.state);
101
102 /*
103 * Check our state, cancel pending requests if needed
104 *
105 * Note: Under some circumstances, we can get a new setup
106 * packet while waiting for the stall ack, just accept it.
107 *
108 * In any case, a SETUP packet in wrong state should have
109 * reset the HW state machine, so let's just log, nuke
110 * requests, move on.
111 */
112 if (ep->ep0.state != ep0_state_token &&
113 ep->ep0.state != ep0_state_stall) {
114 EPDBG(ep, "wrong state\n");
115 ast_vhub_nuke(ep, -EIO);
116 }
117
118 /* Calculate next state for EP0 */
119 ep->ep0.state = ep0_state_data;
120 ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
121
122 /* If this is the vHub, we handle requests differently */
123 std_req_rc = std_req_driver;
124 if (ep->dev == NULL) {
125 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
126 std_req_rc = ast_vhub_std_hub_request(ep, &crq);
127 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
128 std_req_rc = ast_vhub_class_hub_request(ep, &crq);
129 else
130 std_req_rc = std_req_stall;
131 } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
132 std_req_rc = ast_vhub_std_dev_request(ep, &crq);
133
134 /* Act upon result */
135 switch(std_req_rc) {
136 case std_req_complete:
137 goto complete;
138 case std_req_stall:
139 goto stall;
140 case std_req_driver:
141 break;
142 case std_req_data:
143 return;
144 }
145
146 /* Pass request up to the gadget driver */
147 if (WARN_ON(!ep->dev))
148 goto stall;
149 if (ep->dev->driver) {
150 EPDBG(ep, "forwarding to gadget...\n");
151 spin_unlock(&ep->vhub->lock);
152 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
153 spin_lock(&ep->vhub->lock);
154 EPDBG(ep, "driver returned %d\n", rc);
155 } else {
156 EPDBG(ep, "no gadget for request !\n");
157 }
158 if (rc >= 0)
159 return;
160
161 stall:
162 EPDBG(ep, "stalling\n");
163 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
164 ep->ep0.state = ep0_state_stall;
165 ep->ep0.dir_in = false;
166 return;
167
168 complete:
169 EPVDBG(ep, "sending [in] status with no data\n");
170 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
171 ep->ep0.state = ep0_state_status;
172 ep->ep0.dir_in = false;
173}
174
175
176static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
177 struct ast_vhub_req *req)
178{
179 unsigned int chunk;
180 u32 reg;
181
182 /* If this is a 0-length request, it's the gadget trying to
183 * send a status on our behalf. We take it from here.
184 */
185 if (req->req.length == 0)
186 req->last_desc = 1;
187
188 /* Are we done ? Complete request, otherwise wait for next interrupt */
189 if (req->last_desc >= 0) {
190 EPVDBG(ep, "complete send %d/%d\n",
191 req->req.actual, req->req.length);
192 ep->ep0.state = ep0_state_status;
193 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
194 ast_vhub_done(ep, req, 0);
195 return;
196 }
197
198 /*
199 * Next chunk cropped to max packet size. Also check if this
200 * is the last packet
201 */
202 chunk = req->req.length - req->req.actual;
203 if (chunk > ep->ep.maxpacket)
204 chunk = ep->ep.maxpacket;
205 else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
206 req->last_desc = 1;
207
208 EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
209 chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
210
211 /*
212 * Copy data if any (internal requests already have data
213 * in the EP buffer)
214 */
215 if (chunk && req->req.buf)
216 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
217
218 vhub_dma_workaround(ep->buf);
219
220 /* Remember chunk size and trigger send */
221 reg = VHUB_EP0_SET_TX_LEN(chunk);
222 writel(reg, ep->ep0.ctlstat);
223 writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
224 req->req.actual += chunk;
225}
226
227static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
228{
229 EPVDBG(ep, "rx prime\n");
230
231 /* Prime endpoint for receiving data */
232 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
233}
234
235static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
236 unsigned int len)
237{
238 unsigned int remain;
239 int rc = 0;
240
241 /* We are receiving... grab request */
242 remain = req->req.length - req->req.actual;
243
244 EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
245
246 /* Are we getting more than asked ? */
247 if (len > remain) {
248 EPDBG(ep, "receiving too much (ovf: %d) !\n",
249 len - remain);
250 len = remain;
251 rc = -EOVERFLOW;
252 }
253
254 /* Hardware return wrong data len */
255 if (len < ep->ep.maxpacket && len != remain) {
256 EPDBG(ep, "using expected data len instead\n");
257 len = remain;
258 }
259
260 if (len && req->req.buf)
261 memcpy(req->req.buf + req->req.actual, ep->buf, len);
262 req->req.actual += len;
263
264 /* Done ? */
265 if (len < ep->ep.maxpacket || len == remain) {
266 ep->ep0.state = ep0_state_status;
267 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
268 ast_vhub_done(ep, req, rc);
269 } else
270 ast_vhub_ep0_rx_prime(ep);
271}
272
273void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
274{
275 struct ast_vhub_req *req;
276 struct ast_vhub *vhub = ep->vhub;
277 struct device *dev = &vhub->pdev->dev;
278 bool stall = false;
279 u32 stat;
280
281 /* Read EP0 status */
282 stat = readl(ep->ep0.ctlstat);
283
284 /* Grab current request if any */
285 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
286
287 EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
288 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
289
290 switch(ep->ep0.state) {
291 case ep0_state_token:
292 /* There should be no request queued in that state... */
293 if (req) {
294 dev_warn(dev, "request present while in TOKEN state\n");
295 ast_vhub_nuke(ep, -EINVAL);
296 }
297 dev_warn(dev, "ack while in TOKEN state\n");
298 stall = true;
299 break;
300 case ep0_state_data:
301 /* Check the state bits corresponding to our direction */
302 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
303 (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
304 (ep->ep0.dir_in != in_ack)) {
305 /* In that case, ignore interrupt */
306 dev_warn(dev, "irq state mismatch");
307 break;
308 }
309 /*
310 * We are in data phase and there's no request, something is
311 * wrong, stall
312 */
313 if (!req) {
314 dev_warn(dev, "data phase, no request\n");
315 stall = true;
316 break;
317 }
318
319 /* We have a request, handle data transfers */
320 if (ep->ep0.dir_in)
321 ast_vhub_ep0_do_send(ep, req);
322 else
323 ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
324 return;
325 case ep0_state_status:
326 /* Nuke stale requests */
327 if (req) {
328 dev_warn(dev, "request present while in STATUS state\n");
329 ast_vhub_nuke(ep, -EINVAL);
330 }
331
332 /*
333 * If the status phase completes with the wrong ack, stall
334 * the endpoint just in case, to abort whatever the host
335 * was doing.
336 */
337 if (ep->ep0.dir_in == in_ack) {
338 dev_warn(dev, "status direction mismatch\n");
339 stall = true;
340 }
341 break;
342 case ep0_state_stall:
343 /*
344 * There shouldn't be any request left, but nuke just in case
345 * otherwise the stale request will block subsequent ones
346 */
347 ast_vhub_nuke(ep, -EIO);
348 break;
349 }
350
351 /* Reset to token state or stall */
352 if (stall) {
353 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
354 ep->ep0.state = ep0_state_stall;
355 } else
356 ep->ep0.state = ep0_state_token;
357}
358
359static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
360 gfp_t gfp_flags)
361{
362 struct ast_vhub_req *req = to_ast_req(u_req);
363 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
364 struct ast_vhub *vhub = ep->vhub;
365 struct device *dev = &vhub->pdev->dev;
366 unsigned long flags;
367
368 /* Paranoid cheks */
369 if (!u_req || (!u_req->complete && !req->internal)) {
370 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
371 if (u_req) {
372 dev_warn(dev, "complete=%p internal=%d\n",
373 u_req->complete, req->internal);
374 }
375 return -EINVAL;
376 }
377
378 /* Not endpoint 0 ? */
379 if (WARN_ON(ep->d_idx != 0))
380 return -EINVAL;
381
382 /* Disabled device */
383 if (ep->dev && !ep->dev->enabled)
384 return -ESHUTDOWN;
385
386 /* Data, no buffer and not internal ? */
387 if (u_req->length && !u_req->buf && !req->internal) {
388 dev_warn(dev, "Request with no buffer !\n");
389 return -EINVAL;
390 }
391
392 EPVDBG(ep, "enqueue req @%p\n", req);
393 EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
394 u_req->length, u_req->zero,
395 u_req->short_not_ok, ep->ep0.dir_in);
396
397 /* Initialize request progress fields */
398 u_req->status = -EINPROGRESS;
399 u_req->actual = 0;
400 req->last_desc = -1;
401 req->active = false;
402
403 spin_lock_irqsave(&vhub->lock, flags);
404
405 /* EP0 can only support a single request at a time */
406 if (!list_empty(&ep->queue) ||
407 ep->ep0.state == ep0_state_token ||
408 ep->ep0.state == ep0_state_stall) {
409 dev_warn(dev, "EP0: Request in wrong state\n");
410 EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
411 list_empty(&ep->queue), ep->ep0.state);
412 spin_unlock_irqrestore(&vhub->lock, flags);
413 return -EBUSY;
414 }
415
416 /* Add request to list and kick processing if empty */
417 list_add_tail(&req->queue, &ep->queue);
418
419 if (ep->ep0.dir_in) {
420 /* IN request, send data */
421 ast_vhub_ep0_do_send(ep, req);
422 } else if (u_req->length == 0) {
423 /* 0-len request, send completion as rx */
424 EPVDBG(ep, "0-length rx completion\n");
425 ep->ep0.state = ep0_state_status;
426 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
427 ast_vhub_done(ep, req, 0);
428 } else {
429 /* OUT request, start receiver */
430 ast_vhub_ep0_rx_prime(ep);
431 }
432
433 spin_unlock_irqrestore(&vhub->lock, flags);
434
435 return 0;
436}
437
438static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
439{
440 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
441 struct ast_vhub *vhub = ep->vhub;
442 struct ast_vhub_req *req;
443 unsigned long flags;
444 int rc = -EINVAL;
445
446 spin_lock_irqsave(&vhub->lock, flags);
447
448 /* Only one request can be in the queue */
449 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
450
451 /* Is it ours ? */
452 if (req && u_req == &req->req) {
453 EPVDBG(ep, "dequeue req @%p\n", req);
454
455 /*
456 * We don't have to deal with "active" as all
457 * DMAs go to the EP buffers, not the request.
458 */
459 ast_vhub_done(ep, req, -ECONNRESET);
460
461 /* We do stall the EP to clean things up in HW */
462 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
463 ep->ep0.state = ep0_state_status;
464 ep->ep0.dir_in = false;
465 rc = 0;
466 }
467 spin_unlock_irqrestore(&vhub->lock, flags);
468 return rc;
469}
470
471
472static const struct usb_ep_ops ast_vhub_ep0_ops = {
473 .queue = ast_vhub_ep0_queue,
474 .dequeue = ast_vhub_ep0_dequeue,
475 .alloc_request = ast_vhub_alloc_request,
476 .free_request = ast_vhub_free_request,
477};
478
479void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
480{
481 struct ast_vhub_ep *ep = &dev->ep0;
482
483 ast_vhub_nuke(ep, -EIO);
484 ep->ep0.state = ep0_state_token;
485}
486
487
488void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
489 struct ast_vhub_dev *dev)
490{
491 memset(ep, 0, sizeof(*ep));
492
493 INIT_LIST_HEAD(&ep->ep.ep_list);
494 INIT_LIST_HEAD(&ep->queue);
495 ep->ep.ops = &ast_vhub_ep0_ops;
496 ep->ep.name = "ep0";
497 ep->ep.caps.type_control = true;
498 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
499 ep->d_idx = 0;
500 ep->dev = dev;
501 ep->vhub = vhub;
502 ep->ep0.state = ep0_state_token;
503 INIT_LIST_HEAD(&ep->ep0.req.queue);
504 ep->ep0.req.internal = true;
505
506 /* Small difference between vHub and devices */
507 if (dev) {
508 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
509 ep->ep0.setup = vhub->regs +
510 AST_VHUB_SETUP0 + 8 * (dev->index + 1);
511 ep->buf = vhub->ep0_bufs +
512 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
513 ep->buf_dma = vhub->ep0_bufs_dma +
514 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
515 } else {
516 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
517 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
518 ep->buf = vhub->ep0_bufs;
519 ep->buf_dma = vhub->ep0_bufs_dma;
520 }
521}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4 *
5 * ep0.c - Endpoint 0 handling
6 *
7 * Copyright 2017 IBM Corporation
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/delay.h>
14#include <linux/ioport.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/prefetch.h>
21#include <linux/clk.h>
22#include <linux/usb/gadget.h>
23#include <linux/of.h>
24#include <linux/of_gpio.h>
25#include <linux/regmap.h>
26#include <linux/dma-mapping.h>
27
28#include "vhub.h"
29
30int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
31{
32 struct usb_request *req = &ep->ep0.req.req;
33 int rc;
34
35 if (WARN_ON(ep->d_idx != 0))
36 return std_req_stall;
37 if (WARN_ON(!ep->ep0.dir_in))
38 return std_req_stall;
39 if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
40 return std_req_stall;
41 if (WARN_ON(req->status == -EINPROGRESS))
42 return std_req_stall;
43
44 req->buf = ptr;
45 req->length = len;
46 req->complete = NULL;
47 req->zero = true;
48
49 /*
50 * Call internal queue directly after dropping the lock. This is
51 * safe to do as the reply is always the last thing done when
52 * processing a SETUP packet, usually as a tail call
53 */
54 spin_unlock(&ep->vhub->lock);
55 if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
56 rc = std_req_stall;
57 else
58 rc = std_req_data;
59 spin_lock(&ep->vhub->lock);
60 return rc;
61}
62
63int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
64{
65 u8 *buffer = ep->buf;
66 unsigned int i;
67 va_list args;
68
69 va_start(args, len);
70
71 /* Copy data directly into EP buffer */
72 for (i = 0; i < len; i++)
73 buffer[i] = va_arg(args, int);
74 va_end(args);
75
76 /* req->buf NULL means data is already there */
77 return ast_vhub_reply(ep, NULL, len);
78}
79
80void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
81{
82 struct usb_ctrlrequest crq;
83 enum std_req_rc std_req_rc;
84 int rc = -ENODEV;
85
86 if (WARN_ON(ep->d_idx != 0))
87 return;
88
89 /*
90 * Grab the setup packet from the chip and byteswap
91 * interesting fields
92 */
93 memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
94
95 EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
96 crq.bRequestType, crq.bRequest,
97 le16_to_cpu(crq.wValue),
98 le16_to_cpu(crq.wIndex),
99 le16_to_cpu(crq.wLength),
100 (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
101 ep->ep0.state);
102
103 /*
104 * Check our state, cancel pending requests if needed
105 *
106 * Note: Under some circumstances, we can get a new setup
107 * packet while waiting for the stall ack, just accept it.
108 *
109 * In any case, a SETUP packet in wrong state should have
110 * reset the HW state machine, so let's just log, nuke
111 * requests, move on.
112 */
113 if (ep->ep0.state != ep0_state_token &&
114 ep->ep0.state != ep0_state_stall) {
115 EPDBG(ep, "wrong state\n");
116 ast_vhub_nuke(ep, -EIO);
117 }
118
119 /* Calculate next state for EP0 */
120 ep->ep0.state = ep0_state_data;
121 ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
122
123 /* If this is the vHub, we handle requests differently */
124 std_req_rc = std_req_driver;
125 if (ep->dev == NULL) {
126 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
127 std_req_rc = ast_vhub_std_hub_request(ep, &crq);
128 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
129 std_req_rc = ast_vhub_class_hub_request(ep, &crq);
130 else
131 std_req_rc = std_req_stall;
132 } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
133 std_req_rc = ast_vhub_std_dev_request(ep, &crq);
134
135 /* Act upon result */
136 switch(std_req_rc) {
137 case std_req_complete:
138 goto complete;
139 case std_req_stall:
140 goto stall;
141 case std_req_driver:
142 break;
143 case std_req_data:
144 return;
145 }
146
147 /* Pass request up to the gadget driver */
148 if (WARN_ON(!ep->dev))
149 goto stall;
150 if (ep->dev->driver) {
151 EPDBG(ep, "forwarding to gadget...\n");
152 spin_unlock(&ep->vhub->lock);
153 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
154 spin_lock(&ep->vhub->lock);
155 EPDBG(ep, "driver returned %d\n", rc);
156 } else {
157 EPDBG(ep, "no gadget for request !\n");
158 }
159 if (rc >= 0)
160 return;
161
162 stall:
163 EPDBG(ep, "stalling\n");
164 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
165 ep->ep0.state = ep0_state_stall;
166 ep->ep0.dir_in = false;
167 return;
168
169 complete:
170 EPVDBG(ep, "sending [in] status with no data\n");
171 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
172 ep->ep0.state = ep0_state_status;
173 ep->ep0.dir_in = false;
174}
175
176
177static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
178 struct ast_vhub_req *req)
179{
180 unsigned int chunk;
181 u32 reg;
182
183 /* If this is a 0-length request, it's the gadget trying to
184 * send a status on our behalf. We take it from here.
185 */
186 if (req->req.length == 0)
187 req->last_desc = 1;
188
189 /* Are we done ? Complete request, otherwise wait for next interrupt */
190 if (req->last_desc >= 0) {
191 EPVDBG(ep, "complete send %d/%d\n",
192 req->req.actual, req->req.length);
193 ep->ep0.state = ep0_state_status;
194 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
195 ast_vhub_done(ep, req, 0);
196 return;
197 }
198
199 /*
200 * Next chunk cropped to max packet size. Also check if this
201 * is the last packet
202 */
203 chunk = req->req.length - req->req.actual;
204 if (chunk > ep->ep.maxpacket)
205 chunk = ep->ep.maxpacket;
206 else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
207 req->last_desc = 1;
208
209 EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
210 chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
211
212 /*
213 * Copy data if any (internal requests already have data
214 * in the EP buffer)
215 */
216 if (chunk && req->req.buf)
217 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
218
219 vhub_dma_workaround(ep->buf);
220
221 /* Remember chunk size and trigger send */
222 reg = VHUB_EP0_SET_TX_LEN(chunk);
223 writel(reg, ep->ep0.ctlstat);
224 writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
225 req->req.actual += chunk;
226}
227
228static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
229{
230 EPVDBG(ep, "rx prime\n");
231
232 /* Prime endpoint for receiving data */
233 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
234}
235
236static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
237 unsigned int len)
238{
239 unsigned int remain;
240 int rc = 0;
241
242 /* We are receiving... grab request */
243 remain = req->req.length - req->req.actual;
244
245 EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
246
247 /* Are we getting more than asked ? */
248 if (len > remain) {
249 EPDBG(ep, "receiving too much (ovf: %d) !\n",
250 len - remain);
251 len = remain;
252 rc = -EOVERFLOW;
253 }
254
255 /* Hardware return wrong data len */
256 if (len < ep->ep.maxpacket && len != remain) {
257 EPDBG(ep, "using expected data len instead\n");
258 len = remain;
259 }
260
261 if (len && req->req.buf)
262 memcpy(req->req.buf + req->req.actual, ep->buf, len);
263 req->req.actual += len;
264
265 /* Done ? */
266 if (len < ep->ep.maxpacket || len == remain) {
267 ep->ep0.state = ep0_state_status;
268 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
269 ast_vhub_done(ep, req, rc);
270 } else
271 ast_vhub_ep0_rx_prime(ep);
272}
273
274void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
275{
276 struct ast_vhub_req *req;
277 struct ast_vhub *vhub = ep->vhub;
278 struct device *dev = &vhub->pdev->dev;
279 bool stall = false;
280 u32 stat;
281
282 /* Read EP0 status */
283 stat = readl(ep->ep0.ctlstat);
284
285 /* Grab current request if any */
286 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
287
288 EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
289 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
290
291 switch(ep->ep0.state) {
292 case ep0_state_token:
293 /* There should be no request queued in that state... */
294 if (req) {
295 dev_warn(dev, "request present while in TOKEN state\n");
296 ast_vhub_nuke(ep, -EINVAL);
297 }
298 dev_warn(dev, "ack while in TOKEN state\n");
299 stall = true;
300 break;
301 case ep0_state_data:
302 /* Check the state bits corresponding to our direction */
303 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
304 (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
305 (ep->ep0.dir_in != in_ack)) {
306 /* In that case, ignore interrupt */
307 dev_warn(dev, "irq state mismatch");
308 break;
309 }
310 /*
311 * We are in data phase and there's no request, something is
312 * wrong, stall
313 */
314 if (!req) {
315 dev_warn(dev, "data phase, no request\n");
316 stall = true;
317 break;
318 }
319
320 /* We have a request, handle data transfers */
321 if (ep->ep0.dir_in)
322 ast_vhub_ep0_do_send(ep, req);
323 else
324 ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
325 return;
326 case ep0_state_status:
327 /* Nuke stale requests */
328 if (req) {
329 dev_warn(dev, "request present while in STATUS state\n");
330 ast_vhub_nuke(ep, -EINVAL);
331 }
332
333 /*
334 * If the status phase completes with the wrong ack, stall
335 * the endpoint just in case, to abort whatever the host
336 * was doing.
337 */
338 if (ep->ep0.dir_in == in_ack) {
339 dev_warn(dev, "status direction mismatch\n");
340 stall = true;
341 }
342 break;
343 case ep0_state_stall:
344 /*
345 * There shouldn't be any request left, but nuke just in case
346 * otherwise the stale request will block subsequent ones
347 */
348 ast_vhub_nuke(ep, -EIO);
349 break;
350 }
351
352 /* Reset to token state or stall */
353 if (stall) {
354 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
355 ep->ep0.state = ep0_state_stall;
356 } else
357 ep->ep0.state = ep0_state_token;
358}
359
360static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
361 gfp_t gfp_flags)
362{
363 struct ast_vhub_req *req = to_ast_req(u_req);
364 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
365 struct ast_vhub *vhub = ep->vhub;
366 struct device *dev = &vhub->pdev->dev;
367 unsigned long flags;
368
369 /* Paranoid cheks */
370 if (!u_req || (!u_req->complete && !req->internal)) {
371 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
372 if (u_req) {
373 dev_warn(dev, "complete=%p internal=%d\n",
374 u_req->complete, req->internal);
375 }
376 return -EINVAL;
377 }
378
379 /* Not endpoint 0 ? */
380 if (WARN_ON(ep->d_idx != 0))
381 return -EINVAL;
382
383 /* Disabled device */
384 if (ep->dev && !ep->dev->enabled)
385 return -ESHUTDOWN;
386
387 /* Data, no buffer and not internal ? */
388 if (u_req->length && !u_req->buf && !req->internal) {
389 dev_warn(dev, "Request with no buffer !\n");
390 return -EINVAL;
391 }
392
393 EPVDBG(ep, "enqueue req @%p\n", req);
394 EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n",
395 u_req->length, u_req->zero,
396 u_req->short_not_ok, ep->ep0.dir_in);
397
398 /* Initialize request progress fields */
399 u_req->status = -EINPROGRESS;
400 u_req->actual = 0;
401 req->last_desc = -1;
402 req->active = false;
403
404 spin_lock_irqsave(&vhub->lock, flags);
405
406 /* EP0 can only support a single request at a time */
407 if (!list_empty(&ep->queue) ||
408 ep->ep0.state == ep0_state_token ||
409 ep->ep0.state == ep0_state_stall) {
410 dev_warn(dev, "EP0: Request in wrong state\n");
411 EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
412 list_empty(&ep->queue), ep->ep0.state);
413 spin_unlock_irqrestore(&vhub->lock, flags);
414 return -EBUSY;
415 }
416
417 /* Add request to list and kick processing if empty */
418 list_add_tail(&req->queue, &ep->queue);
419
420 if (ep->ep0.dir_in) {
421 /* IN request, send data */
422 ast_vhub_ep0_do_send(ep, req);
423 } else if (u_req->length == 0) {
424 /* 0-len request, send completion as rx */
425 EPVDBG(ep, "0-length rx completion\n");
426 ep->ep0.state = ep0_state_status;
427 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
428 ast_vhub_done(ep, req, 0);
429 } else {
430 /* OUT request, start receiver */
431 ast_vhub_ep0_rx_prime(ep);
432 }
433
434 spin_unlock_irqrestore(&vhub->lock, flags);
435
436 return 0;
437}
438
439static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
440{
441 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
442 struct ast_vhub *vhub = ep->vhub;
443 struct ast_vhub_req *req;
444 unsigned long flags;
445 int rc = -EINVAL;
446
447 spin_lock_irqsave(&vhub->lock, flags);
448
449 /* Only one request can be in the queue */
450 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
451
452 /* Is it ours ? */
453 if (req && u_req == &req->req) {
454 EPVDBG(ep, "dequeue req @%p\n", req);
455
456 /*
457 * We don't have to deal with "active" as all
458 * DMAs go to the EP buffers, not the request.
459 */
460 ast_vhub_done(ep, req, -ECONNRESET);
461
462 /* We do stall the EP to clean things up in HW */
463 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
464 ep->ep0.state = ep0_state_status;
465 ep->ep0.dir_in = false;
466 rc = 0;
467 }
468 spin_unlock_irqrestore(&vhub->lock, flags);
469 return rc;
470}
471
472
473static const struct usb_ep_ops ast_vhub_ep0_ops = {
474 .queue = ast_vhub_ep0_queue,
475 .dequeue = ast_vhub_ep0_dequeue,
476 .alloc_request = ast_vhub_alloc_request,
477 .free_request = ast_vhub_free_request,
478};
479
480void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
481{
482 struct ast_vhub_ep *ep = &dev->ep0;
483
484 ast_vhub_nuke(ep, -EIO);
485 ep->ep0.state = ep0_state_token;
486}
487
488
489void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
490 struct ast_vhub_dev *dev)
491{
492 memset(ep, 0, sizeof(*ep));
493
494 INIT_LIST_HEAD(&ep->ep.ep_list);
495 INIT_LIST_HEAD(&ep->queue);
496 ep->ep.ops = &ast_vhub_ep0_ops;
497 ep->ep.name = "ep0";
498 ep->ep.caps.type_control = true;
499 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
500 ep->d_idx = 0;
501 ep->dev = dev;
502 ep->vhub = vhub;
503 ep->ep0.state = ep0_state_token;
504 INIT_LIST_HEAD(&ep->ep0.req.queue);
505 ep->ep0.req.internal = true;
506
507 /* Small difference between vHub and devices */
508 if (dev) {
509 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
510 ep->ep0.setup = vhub->regs +
511 AST_VHUB_SETUP0 + 8 * (dev->index + 1);
512 ep->buf = vhub->ep0_bufs +
513 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
514 ep->buf_dma = vhub->ep0_bufs_dma +
515 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
516 } else {
517 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
518 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
519 ep->buf = vhub->ep0_bufs;
520 ep->buf_dma = vhub->ep0_bufs_dma;
521 }
522}