Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * xhci-dbgtty.c - tty glue for xHCI debug capability
  4 *
  5 * Copyright (C) 2017 Intel Corporation
  6 *
  7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/tty.h>
 12#include <linux/tty_flip.h>
 13#include <linux/idr.h>
 14
 15#include "xhci.h"
 16#include "xhci-dbgcap.h"
 17
 18static struct tty_driver *dbc_tty_driver;
 19static struct idr dbc_tty_minors;
 20static DEFINE_MUTEX(dbc_tty_minors_lock);
 21
 22static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
 23{
 24	return dbc->priv;
 25}
 26
 27static unsigned int
 28dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
 29{
 30	unsigned int		len;
 31
 32	len = kfifo_len(&port->write_fifo);
 33	if (len < size)
 34		size = len;
 35	if (size != 0)
 36		size = kfifo_out(&port->write_fifo, packet, size);
 37	return size;
 38}
 39
 40static int dbc_start_tx(struct dbc_port *port)
 41	__releases(&port->port_lock)
 42	__acquires(&port->port_lock)
 43{
 44	int			len;
 45	struct dbc_request	*req;
 46	int			status = 0;
 47	bool			do_tty_wake = false;
 48	struct list_head	*pool = &port->write_pool;
 49
 50	while (!list_empty(pool)) {
 51		req = list_entry(pool->next, struct dbc_request, list_pool);
 52		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
 53		if (len == 0)
 54			break;
 55		do_tty_wake = true;
 56
 57		req->length = len;
 58		list_del(&req->list_pool);
 59
 60		spin_unlock(&port->port_lock);
 61		status = dbc_ep_queue(req);
 62		spin_lock(&port->port_lock);
 63
 64		if (status) {
 65			list_add(&req->list_pool, pool);
 66			break;
 67		}
 68	}
 69
 70	if (do_tty_wake && port->port.tty)
 71		tty_wakeup(port->port.tty);
 72
 73	return status;
 74}
 75
 76static void dbc_start_rx(struct dbc_port *port)
 77	__releases(&port->port_lock)
 78	__acquires(&port->port_lock)
 79{
 80	struct dbc_request	*req;
 81	int			status;
 82	struct list_head	*pool = &port->read_pool;
 83
 84	while (!list_empty(pool)) {
 85		if (!port->port.tty)
 86			break;
 87
 88		req = list_entry(pool->next, struct dbc_request, list_pool);
 89		list_del(&req->list_pool);
 90		req->length = DBC_MAX_PACKET;
 91
 92		spin_unlock(&port->port_lock);
 93		status = dbc_ep_queue(req);
 94		spin_lock(&port->port_lock);
 95
 96		if (status) {
 97			list_add(&req->list_pool, pool);
 98			break;
 99		}
100	}
101}
102
103static void
104dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
105{
106	unsigned long		flags;
107	struct dbc_port		*port = dbc_to_port(dbc);
 
108
109	spin_lock_irqsave(&port->port_lock, flags);
110	list_add_tail(&req->list_pool, &port->read_queue);
111	tasklet_schedule(&port->push);
112	spin_unlock_irqrestore(&port->port_lock, flags);
113}
114
115static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
116{
117	unsigned long		flags;
118	struct dbc_port		*port = dbc_to_port(dbc);
 
119
120	spin_lock_irqsave(&port->port_lock, flags);
121	list_add(&req->list_pool, &port->write_pool);
122	switch (req->status) {
123	case 0:
124		dbc_start_tx(port);
125		break;
126	case -ESHUTDOWN:
127		break;
128	default:
129		dev_warn(dbc->dev, "unexpected write complete status %d\n",
130			  req->status);
131		break;
132	}
133	spin_unlock_irqrestore(&port->port_lock, flags);
134}
135
136static void xhci_dbc_free_req(struct dbc_request *req)
137{
138	kfree(req->buf);
139	dbc_free_request(req);
140}
141
142static int
143xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
144			struct list_head *head,
145			void (*fn)(struct xhci_dbc *, struct dbc_request *))
146{
147	int			i;
148	struct dbc_request	*req;
149
150	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
151		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
152		if (!req)
153			break;
154
155		req->length = DBC_MAX_PACKET;
156		req->buf = kmalloc(req->length, GFP_KERNEL);
157		if (!req->buf) {
158			dbc_free_request(req);
159			break;
160		}
161
162		req->complete = fn;
163		list_add_tail(&req->list_pool, head);
164	}
165
166	return list_empty(head) ? -ENOMEM : 0;
167}
168
169static void
170xhci_dbc_free_requests(struct list_head *head)
171{
172	struct dbc_request	*req;
173
174	while (!list_empty(head)) {
175		req = list_entry(head->next, struct dbc_request, list_pool);
176		list_del(&req->list_pool);
177		xhci_dbc_free_req(req);
178	}
179}
180
181static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
182{
183	struct dbc_port		*port;
184
185	mutex_lock(&dbc_tty_minors_lock);
186	port = idr_find(&dbc_tty_minors, tty->index);
187	mutex_unlock(&dbc_tty_minors_lock);
188
189	if (!port)
190		return -ENXIO;
191
192	tty->driver_data = port;
193
194	return tty_port_install(&port->port, driver, tty);
195}
196
197static int dbc_tty_open(struct tty_struct *tty, struct file *file)
198{
199	struct dbc_port		*port = tty->driver_data;
200
201	return tty_port_open(&port->port, tty, file);
202}
203
204static void dbc_tty_close(struct tty_struct *tty, struct file *file)
205{
206	struct dbc_port		*port = tty->driver_data;
207
208	tty_port_close(&port->port, tty, file);
209}
210
211static int dbc_tty_write(struct tty_struct *tty,
212			 const unsigned char *buf,
213			 int count)
214{
215	struct dbc_port		*port = tty->driver_data;
216	unsigned long		flags;
217
218	spin_lock_irqsave(&port->port_lock, flags);
219	if (count)
220		count = kfifo_in(&port->write_fifo, buf, count);
221	dbc_start_tx(port);
222	spin_unlock_irqrestore(&port->port_lock, flags);
223
224	return count;
225}
226
227static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
228{
229	struct dbc_port		*port = tty->driver_data;
230	unsigned long		flags;
231	int			status;
232
233	spin_lock_irqsave(&port->port_lock, flags);
234	status = kfifo_put(&port->write_fifo, ch);
235	spin_unlock_irqrestore(&port->port_lock, flags);
236
237	return status;
238}
239
240static void dbc_tty_flush_chars(struct tty_struct *tty)
241{
242	struct dbc_port		*port = tty->driver_data;
243	unsigned long		flags;
244
245	spin_lock_irqsave(&port->port_lock, flags);
246	dbc_start_tx(port);
247	spin_unlock_irqrestore(&port->port_lock, flags);
248}
249
250static unsigned int dbc_tty_write_room(struct tty_struct *tty)
251{
252	struct dbc_port		*port = tty->driver_data;
253	unsigned long		flags;
254	unsigned int		room;
255
256	spin_lock_irqsave(&port->port_lock, flags);
257	room = kfifo_avail(&port->write_fifo);
258	spin_unlock_irqrestore(&port->port_lock, flags);
259
260	return room;
261}
262
263static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
264{
265	struct dbc_port		*port = tty->driver_data;
266	unsigned long		flags;
267	unsigned int		chars;
268
269	spin_lock_irqsave(&port->port_lock, flags);
270	chars = kfifo_len(&port->write_fifo);
271	spin_unlock_irqrestore(&port->port_lock, flags);
272
273	return chars;
274}
275
276static void dbc_tty_unthrottle(struct tty_struct *tty)
277{
278	struct dbc_port		*port = tty->driver_data;
279	unsigned long		flags;
280
281	spin_lock_irqsave(&port->port_lock, flags);
282	tasklet_schedule(&port->push);
283	spin_unlock_irqrestore(&port->port_lock, flags);
284}
285
286static const struct tty_operations dbc_tty_ops = {
287	.install		= dbc_tty_install,
288	.open			= dbc_tty_open,
289	.close			= dbc_tty_close,
290	.write			= dbc_tty_write,
291	.put_char		= dbc_tty_put_char,
292	.flush_chars		= dbc_tty_flush_chars,
293	.write_room		= dbc_tty_write_room,
294	.chars_in_buffer	= dbc_tty_chars_in_buffer,
295	.unthrottle		= dbc_tty_unthrottle,
296};
297
298static void dbc_rx_push(struct tasklet_struct *t)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299{
300	struct dbc_request	*req;
301	struct tty_struct	*tty;
302	unsigned long		flags;
303	bool			do_push = false;
304	bool			disconnect = false;
305	struct dbc_port		*port = from_tasklet(port, t, push);
306	struct list_head	*queue = &port->read_queue;
307
308	spin_lock_irqsave(&port->port_lock, flags);
309	tty = port->port.tty;
310	while (!list_empty(queue)) {
311		req = list_first_entry(queue, struct dbc_request, list_pool);
312
313		if (tty && tty_throttled(tty))
314			break;
315
316		switch (req->status) {
317		case 0:
318			break;
319		case -ESHUTDOWN:
320			disconnect = true;
321			break;
322		default:
323			pr_warn("ttyDBC0: unexpected RX status %d\n",
324				req->status);
325			break;
326		}
327
328		if (req->actual) {
329			char		*packet = req->buf;
330			unsigned int	n, size = req->actual;
331			int		count;
332
333			n = port->n_read;
334			if (n) {
335				packet += n;
336				size -= n;
337			}
338
339			count = tty_insert_flip_string(&port->port, packet,
340						       size);
341			if (count)
342				do_push = true;
343			if (count != size) {
344				port->n_read += count;
345				break;
346			}
347			port->n_read = 0;
348		}
349
350		list_move(&req->list_pool, &port->read_pool);
351	}
352
353	if (do_push)
354		tty_flip_buffer_push(&port->port);
355
356	if (!list_empty(queue) && tty) {
357		if (!tty_throttled(tty)) {
358			if (do_push)
359				tasklet_schedule(&port->push);
360			else
361				pr_warn("ttyDBC0: RX not scheduled?\n");
362		}
363	}
364
365	if (!disconnect)
366		dbc_start_rx(port);
367
368	spin_unlock_irqrestore(&port->port_lock, flags);
369}
370
371static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
372{
373	unsigned long	flags;
374	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
375
376	spin_lock_irqsave(&port->port_lock, flags);
377	dbc_start_rx(port);
378	spin_unlock_irqrestore(&port->port_lock, flags);
379
380	return 0;
381}
382
383static const struct tty_port_operations dbc_port_ops = {
384	.activate =	dbc_port_activate,
385};
386
387static void
388xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
389{
390	tty_port_init(&port->port);
391	spin_lock_init(&port->port_lock);
392	tasklet_setup(&port->push, dbc_rx_push);
393	INIT_LIST_HEAD(&port->read_pool);
394	INIT_LIST_HEAD(&port->read_queue);
395	INIT_LIST_HEAD(&port->write_pool);
396
 
 
397	port->port.ops =	&dbc_port_ops;
398	port->n_read =		0;
399}
400
401static void
402xhci_dbc_tty_exit_port(struct dbc_port *port)
403{
404	tasklet_kill(&port->push);
405	tty_port_destroy(&port->port);
406}
407
408static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
409{
410	int			ret;
411	struct device		*tty_dev;
412	struct dbc_port		*port = dbc_to_port(dbc);
413
414	if (port->registered)
415		return -EBUSY;
416
417	xhci_dbc_tty_init_port(dbc, port);
418
419	mutex_lock(&dbc_tty_minors_lock);
420	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
421	mutex_unlock(&dbc_tty_minors_lock);
422
423	if (port->minor < 0) {
424		ret = port->minor;
425		goto err_idr;
 
 
 
426	}
427
428	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
429	if (ret)
430		goto err_exit_port;
431
432	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
433				      dbc_read_complete);
434	if (ret)
435		goto err_free_fifo;
436
437	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
438				      dbc_write_complete);
439	if (ret)
440		goto err_free_requests;
441
442	tty_dev = tty_port_register_device(&port->port,
443					   dbc_tty_driver, port->minor, NULL);
444	if (IS_ERR(tty_dev)) {
445		ret = PTR_ERR(tty_dev);
446		goto err_free_requests;
447	}
448
449	port->registered = true;
450
451	return 0;
452
453err_free_requests:
454	xhci_dbc_free_requests(&port->read_pool);
455	xhci_dbc_free_requests(&port->write_pool);
456err_free_fifo:
457	kfifo_free(&port->write_fifo);
458err_exit_port:
459	idr_remove(&dbc_tty_minors, port->minor);
460err_idr:
 
 
461	xhci_dbc_tty_exit_port(port);
462
463	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
464
465	return ret;
466}
467
468static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
469{
470	struct dbc_port		*port = dbc_to_port(dbc);
 
471
472	if (!port->registered)
473		return;
474	tty_unregister_device(dbc_tty_driver, port->minor);
475	xhci_dbc_tty_exit_port(port);
476	port->registered = false;
477
478	mutex_lock(&dbc_tty_minors_lock);
479	idr_remove(&dbc_tty_minors, port->minor);
480	mutex_unlock(&dbc_tty_minors_lock);
481
482	kfifo_free(&port->write_fifo);
483	xhci_dbc_free_requests(&port->read_pool);
484	xhci_dbc_free_requests(&port->read_queue);
485	xhci_dbc_free_requests(&port->write_pool);
486}
487
488static const struct dbc_driver dbc_driver = {
489	.configure		= xhci_dbc_tty_register_device,
490	.disconnect		= xhci_dbc_tty_unregister_device,
491};
492
493int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
494{
495	struct xhci_dbc		*dbc;
496	struct dbc_port		*port;
497	int			status;
498
499	if (!dbc_tty_driver)
500		return -ENODEV;
501
502	port = kzalloc(sizeof(*port), GFP_KERNEL);
503	if (!port)
504		return -ENOMEM;
505
506	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
507
508	if (!dbc) {
509		status = -ENOMEM;
510		goto out2;
511	}
512
513	dbc->priv = port;
514
515	/* get rid of xhci once this is a real driver binding to a device */
516	xhci->dbc = dbc;
517
518	return 0;
519out2:
520	kfree(port);
521
522	return status;
523}
524
525/*
526 * undo what probe did, assume dbc is stopped already.
527 * we also assume tty_unregister_device() is called before this
528 */
529void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
530{
531	struct dbc_port         *port = dbc_to_port(dbc);
532
533	xhci_dbc_remove(dbc);
534	kfree(port);
535}
536
537int dbc_tty_init(void)
538{
539	int		ret;
540
541	idr_init(&dbc_tty_minors);
542
543	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
544					  TTY_DRIVER_DYNAMIC_DEV);
545	if (IS_ERR(dbc_tty_driver)) {
546		idr_destroy(&dbc_tty_minors);
547		return PTR_ERR(dbc_tty_driver);
548	}
549
550	dbc_tty_driver->driver_name = "dbc_serial";
551	dbc_tty_driver->name = "ttyDBC";
552
553	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
554	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
555	dbc_tty_driver->init_termios = tty_std_termios;
556	dbc_tty_driver->init_termios.c_cflag =
557			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
558	dbc_tty_driver->init_termios.c_ispeed = 9600;
559	dbc_tty_driver->init_termios.c_ospeed = 9600;
560
561	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
562
563	ret = tty_register_driver(dbc_tty_driver);
564	if (ret) {
565		pr_err("Can't register dbc tty driver\n");
566		tty_driver_kref_put(dbc_tty_driver);
567		idr_destroy(&dbc_tty_minors);
568	}
569
570	return ret;
571}
572
573void dbc_tty_exit(void)
574{
575	if (dbc_tty_driver) {
576		tty_unregister_driver(dbc_tty_driver);
577		tty_driver_kref_put(dbc_tty_driver);
578		dbc_tty_driver = NULL;
579	}
580
581	idr_destroy(&dbc_tty_minors);
582}
v4.17
  1/**
 
  2 * xhci-dbgtty.c - tty glue for xHCI debug capability
  3 *
  4 * Copyright (C) 2017 Intel Corporation
  5 *
  6 * Author: Lu Baolu <baolu.lu@linux.intel.com>
  7 */
  8
  9#include <linux/slab.h>
 10#include <linux/tty.h>
 11#include <linux/tty_flip.h>
 
 12
 13#include "xhci.h"
 14#include "xhci-dbgcap.h"
 15
 
 
 
 
 
 
 
 
 
 16static unsigned int
 17dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
 18{
 19	unsigned int		len;
 20
 21	len = kfifo_len(&port->write_fifo);
 22	if (len < size)
 23		size = len;
 24	if (size != 0)
 25		size = kfifo_out(&port->write_fifo, packet, size);
 26	return size;
 27}
 28
 29static int dbc_start_tx(struct dbc_port *port)
 30	__releases(&port->port_lock)
 31	__acquires(&port->port_lock)
 32{
 33	int			len;
 34	struct dbc_request	*req;
 35	int			status = 0;
 36	bool			do_tty_wake = false;
 37	struct list_head	*pool = &port->write_pool;
 38
 39	while (!list_empty(pool)) {
 40		req = list_entry(pool->next, struct dbc_request, list_pool);
 41		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
 42		if (len == 0)
 43			break;
 44		do_tty_wake = true;
 45
 46		req->length = len;
 47		list_del(&req->list_pool);
 48
 49		spin_unlock(&port->port_lock);
 50		status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
 51		spin_lock(&port->port_lock);
 52
 53		if (status) {
 54			list_add(&req->list_pool, pool);
 55			break;
 56		}
 57	}
 58
 59	if (do_tty_wake && port->port.tty)
 60		tty_wakeup(port->port.tty);
 61
 62	return status;
 63}
 64
 65static void dbc_start_rx(struct dbc_port *port)
 66	__releases(&port->port_lock)
 67	__acquires(&port->port_lock)
 68{
 69	struct dbc_request	*req;
 70	int			status;
 71	struct list_head	*pool = &port->read_pool;
 72
 73	while (!list_empty(pool)) {
 74		if (!port->port.tty)
 75			break;
 76
 77		req = list_entry(pool->next, struct dbc_request, list_pool);
 78		list_del(&req->list_pool);
 79		req->length = DBC_MAX_PACKET;
 80
 81		spin_unlock(&port->port_lock);
 82		status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
 83		spin_lock(&port->port_lock);
 84
 85		if (status) {
 86			list_add(&req->list_pool, pool);
 87			break;
 88		}
 89	}
 90}
 91
 92static void
 93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
 94{
 95	unsigned long		flags;
 96	struct xhci_dbc		*dbc = xhci->dbc;
 97	struct dbc_port		*port = &dbc->port;
 98
 99	spin_lock_irqsave(&port->port_lock, flags);
100	list_add_tail(&req->list_pool, &port->read_queue);
101	tasklet_schedule(&port->push);
102	spin_unlock_irqrestore(&port->port_lock, flags);
103}
104
105static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
106{
107	unsigned long		flags;
108	struct xhci_dbc		*dbc = xhci->dbc;
109	struct dbc_port		*port = &dbc->port;
110
111	spin_lock_irqsave(&port->port_lock, flags);
112	list_add(&req->list_pool, &port->write_pool);
113	switch (req->status) {
114	case 0:
115		dbc_start_tx(port);
116		break;
117	case -ESHUTDOWN:
118		break;
119	default:
120		xhci_warn(xhci, "unexpected write complete status %d\n",
121			  req->status);
122		break;
123	}
124	spin_unlock_irqrestore(&port->port_lock, flags);
125}
126
127static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
128{
129	kfree(req->buf);
130	dbc_free_request(dep, req);
131}
132
133static int
134xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
135			void (*fn)(struct xhci_hcd *, struct dbc_request *))
 
136{
137	int			i;
138	struct dbc_request	*req;
139
140	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
141		req = dbc_alloc_request(dep, GFP_ATOMIC);
142		if (!req)
143			break;
144
145		req->length = DBC_MAX_PACKET;
146		req->buf = kmalloc(req->length, GFP_KERNEL);
147		if (!req->buf) {
148			xhci_dbc_free_req(dep, req);
149			break;
150		}
151
152		req->complete = fn;
153		list_add_tail(&req->list_pool, head);
154	}
155
156	return list_empty(head) ? -ENOMEM : 0;
157}
158
159static void
160xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
161{
162	struct dbc_request	*req;
163
164	while (!list_empty(head)) {
165		req = list_entry(head->next, struct dbc_request, list_pool);
166		list_del(&req->list_pool);
167		xhci_dbc_free_req(dep, req);
168	}
169}
170
171static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
172{
173	struct dbc_port		*port = driver->driver_state;
 
 
 
 
 
 
 
174
175	tty->driver_data = port;
176
177	return tty_port_install(&port->port, driver, tty);
178}
179
180static int dbc_tty_open(struct tty_struct *tty, struct file *file)
181{
182	struct dbc_port		*port = tty->driver_data;
183
184	return tty_port_open(&port->port, tty, file);
185}
186
187static void dbc_tty_close(struct tty_struct *tty, struct file *file)
188{
189	struct dbc_port		*port = tty->driver_data;
190
191	tty_port_close(&port->port, tty, file);
192}
193
194static int dbc_tty_write(struct tty_struct *tty,
195			 const unsigned char *buf,
196			 int count)
197{
198	struct dbc_port		*port = tty->driver_data;
199	unsigned long		flags;
200
201	spin_lock_irqsave(&port->port_lock, flags);
202	if (count)
203		count = kfifo_in(&port->write_fifo, buf, count);
204	dbc_start_tx(port);
205	spin_unlock_irqrestore(&port->port_lock, flags);
206
207	return count;
208}
209
210static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
211{
212	struct dbc_port		*port = tty->driver_data;
213	unsigned long		flags;
214	int			status;
215
216	spin_lock_irqsave(&port->port_lock, flags);
217	status = kfifo_put(&port->write_fifo, ch);
218	spin_unlock_irqrestore(&port->port_lock, flags);
219
220	return status;
221}
222
223static void dbc_tty_flush_chars(struct tty_struct *tty)
224{
225	struct dbc_port		*port = tty->driver_data;
226	unsigned long		flags;
227
228	spin_lock_irqsave(&port->port_lock, flags);
229	dbc_start_tx(port);
230	spin_unlock_irqrestore(&port->port_lock, flags);
231}
232
233static int dbc_tty_write_room(struct tty_struct *tty)
234{
235	struct dbc_port		*port = tty->driver_data;
236	unsigned long		flags;
237	int			room = 0;
238
239	spin_lock_irqsave(&port->port_lock, flags);
240	room = kfifo_avail(&port->write_fifo);
241	spin_unlock_irqrestore(&port->port_lock, flags);
242
243	return room;
244}
245
246static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
247{
248	struct dbc_port		*port = tty->driver_data;
249	unsigned long		flags;
250	int			chars = 0;
251
252	spin_lock_irqsave(&port->port_lock, flags);
253	chars = kfifo_len(&port->write_fifo);
254	spin_unlock_irqrestore(&port->port_lock, flags);
255
256	return chars;
257}
258
259static void dbc_tty_unthrottle(struct tty_struct *tty)
260{
261	struct dbc_port		*port = tty->driver_data;
262	unsigned long		flags;
263
264	spin_lock_irqsave(&port->port_lock, flags);
265	tasklet_schedule(&port->push);
266	spin_unlock_irqrestore(&port->port_lock, flags);
267}
268
269static const struct tty_operations dbc_tty_ops = {
270	.install		= dbc_tty_install,
271	.open			= dbc_tty_open,
272	.close			= dbc_tty_close,
273	.write			= dbc_tty_write,
274	.put_char		= dbc_tty_put_char,
275	.flush_chars		= dbc_tty_flush_chars,
276	.write_room		= dbc_tty_write_room,
277	.chars_in_buffer	= dbc_tty_chars_in_buffer,
278	.unthrottle		= dbc_tty_unthrottle,
279};
280
281static struct tty_driver *dbc_tty_driver;
282
283int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
284{
285	int			status;
286	struct xhci_dbc		*dbc = xhci->dbc;
287
288	dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
289					  TTY_DRIVER_DYNAMIC_DEV);
290	if (IS_ERR(dbc_tty_driver)) {
291		status = PTR_ERR(dbc_tty_driver);
292		dbc_tty_driver = NULL;
293		return status;
294	}
295
296	dbc_tty_driver->driver_name = "dbc_serial";
297	dbc_tty_driver->name = "ttyDBC";
298
299	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
300	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
301	dbc_tty_driver->init_termios = tty_std_termios;
302	dbc_tty_driver->init_termios.c_cflag =
303			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
304	dbc_tty_driver->init_termios.c_ispeed = 9600;
305	dbc_tty_driver->init_termios.c_ospeed = 9600;
306	dbc_tty_driver->driver_state = &dbc->port;
307
308	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
309
310	status = tty_register_driver(dbc_tty_driver);
311	if (status) {
312		xhci_err(xhci,
313			 "can't register dbc tty driver, err %d\n", status);
314		put_tty_driver(dbc_tty_driver);
315		dbc_tty_driver = NULL;
316	}
317
318	return status;
319}
320
321void xhci_dbc_tty_unregister_driver(void)
322{
323	if (dbc_tty_driver) {
324		tty_unregister_driver(dbc_tty_driver);
325		put_tty_driver(dbc_tty_driver);
326		dbc_tty_driver = NULL;
327	}
328}
329
330static void dbc_rx_push(unsigned long _port)
331{
332	struct dbc_request	*req;
333	struct tty_struct	*tty;
334	unsigned long		flags;
335	bool			do_push = false;
336	bool			disconnect = false;
337	struct dbc_port		*port = (void *)_port;
338	struct list_head	*queue = &port->read_queue;
339
340	spin_lock_irqsave(&port->port_lock, flags);
341	tty = port->port.tty;
342	while (!list_empty(queue)) {
343		req = list_first_entry(queue, struct dbc_request, list_pool);
344
345		if (tty && tty_throttled(tty))
346			break;
347
348		switch (req->status) {
349		case 0:
350			break;
351		case -ESHUTDOWN:
352			disconnect = true;
353			break;
354		default:
355			pr_warn("ttyDBC0: unexpected RX status %d\n",
356				req->status);
357			break;
358		}
359
360		if (req->actual) {
361			char		*packet = req->buf;
362			unsigned int	n, size = req->actual;
363			int		count;
364
365			n = port->n_read;
366			if (n) {
367				packet += n;
368				size -= n;
369			}
370
371			count = tty_insert_flip_string(&port->port, packet,
372						       size);
373			if (count)
374				do_push = true;
375			if (count != size) {
376				port->n_read += count;
377				break;
378			}
379			port->n_read = 0;
380		}
381
382		list_move(&req->list_pool, &port->read_pool);
383	}
384
385	if (do_push)
386		tty_flip_buffer_push(&port->port);
387
388	if (!list_empty(queue) && tty) {
389		if (!tty_throttled(tty)) {
390			if (do_push)
391				tasklet_schedule(&port->push);
392			else
393				pr_warn("ttyDBC0: RX not scheduled?\n");
394		}
395	}
396
397	if (!disconnect)
398		dbc_start_rx(port);
399
400	spin_unlock_irqrestore(&port->port_lock, flags);
401}
402
403static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
404{
405	unsigned long	flags;
406	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
407
408	spin_lock_irqsave(&port->port_lock, flags);
409	dbc_start_rx(port);
410	spin_unlock_irqrestore(&port->port_lock, flags);
411
412	return 0;
413}
414
415static const struct tty_port_operations dbc_port_ops = {
416	.activate =	dbc_port_activate,
417};
418
419static void
420xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
421{
422	tty_port_init(&port->port);
423	spin_lock_init(&port->port_lock);
424	tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
425	INIT_LIST_HEAD(&port->read_pool);
426	INIT_LIST_HEAD(&port->read_queue);
427	INIT_LIST_HEAD(&port->write_pool);
428
429	port->in =		get_in_ep(xhci);
430	port->out =		get_out_ep(xhci);
431	port->port.ops =	&dbc_port_ops;
432	port->n_read =		0;
433}
434
435static void
436xhci_dbc_tty_exit_port(struct dbc_port *port)
437{
438	tasklet_kill(&port->push);
439	tty_port_destroy(&port->port);
440}
441
442int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
443{
444	int			ret;
445	struct device		*tty_dev;
446	struct xhci_dbc		*dbc = xhci->dbc;
447	struct dbc_port		*port = &dbc->port;
 
 
 
 
 
 
 
 
448
449	xhci_dbc_tty_init_port(xhci, port);
450	tty_dev = tty_port_register_device(&port->port,
451					   dbc_tty_driver, 0, NULL);
452	if (IS_ERR(tty_dev)) {
453		ret = PTR_ERR(tty_dev);
454		goto register_fail;
455	}
456
457	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
458	if (ret)
459		goto buf_alloc_fail;
460
461	ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
462				      dbc_read_complete);
463	if (ret)
464		goto request_fail;
465
466	ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
467				      dbc_write_complete);
468	if (ret)
469		goto request_fail;
 
 
 
 
 
 
 
470
471	port->registered = true;
472
473	return 0;
474
475request_fail:
476	xhci_dbc_free_requests(port->in, &port->read_pool);
477	xhci_dbc_free_requests(port->out, &port->write_pool);
 
478	kfifo_free(&port->write_fifo);
479
480buf_alloc_fail:
481	tty_unregister_device(dbc_tty_driver, 0);
482
483register_fail:
484	xhci_dbc_tty_exit_port(port);
485
486	xhci_err(xhci, "can't register tty port, err %d\n", ret);
487
488	return ret;
489}
490
491void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
492{
493	struct xhci_dbc		*dbc = xhci->dbc;
494	struct dbc_port		*port = &dbc->port;
495
496	tty_unregister_device(dbc_tty_driver, 0);
 
 
497	xhci_dbc_tty_exit_port(port);
498	port->registered = false;
499
 
 
 
 
500	kfifo_free(&port->write_fifo);
501	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
502	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
503	xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504}