Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * xhci-dbgtty.c - tty glue for xHCI debug capability
  4 *
  5 * Copyright (C) 2017 Intel Corporation
  6 *
  7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/tty.h>
 12#include <linux/tty_flip.h>
 13#include <linux/idr.h>
 14
 15#include "xhci.h"
 16#include "xhci-dbgcap.h"
 17
 18static struct tty_driver *dbc_tty_driver;
 19static struct idr dbc_tty_minors;
 20static DEFINE_MUTEX(dbc_tty_minors_lock);
 21
 22static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
 23{
 24	return dbc->priv;
 25}
 26
 27static unsigned int
 28dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
 29{
 30	unsigned int		len;
 31
 32	len = kfifo_len(&port->write_fifo);
 33	if (len < size)
 34		size = len;
 35	if (size != 0)
 36		size = kfifo_out(&port->write_fifo, packet, size);
 37	return size;
 38}
 39
 40static int dbc_start_tx(struct dbc_port *port)
 41	__releases(&port->port_lock)
 42	__acquires(&port->port_lock)
 43{
 44	int			len;
 45	struct dbc_request	*req;
 46	int			status = 0;
 47	bool			do_tty_wake = false;
 48	struct list_head	*pool = &port->write_pool;
 49
 50	while (!list_empty(pool)) {
 51		req = list_entry(pool->next, struct dbc_request, list_pool);
 52		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
 53		if (len == 0)
 54			break;
 55		do_tty_wake = true;
 56
 57		req->length = len;
 58		list_del(&req->list_pool);
 59
 60		spin_unlock(&port->port_lock);
 61		status = dbc_ep_queue(req);
 62		spin_lock(&port->port_lock);
 63
 64		if (status) {
 65			list_add(&req->list_pool, pool);
 66			break;
 67		}
 68	}
 69
 70	if (do_tty_wake && port->port.tty)
 71		tty_wakeup(port->port.tty);
 72
 73	return status;
 74}
 75
 76static void dbc_start_rx(struct dbc_port *port)
 77	__releases(&port->port_lock)
 78	__acquires(&port->port_lock)
 79{
 80	struct dbc_request	*req;
 81	int			status;
 82	struct list_head	*pool = &port->read_pool;
 83
 84	while (!list_empty(pool)) {
 85		if (!port->port.tty)
 86			break;
 87
 88		req = list_entry(pool->next, struct dbc_request, list_pool);
 89		list_del(&req->list_pool);
 90		req->length = DBC_MAX_PACKET;
 91
 92		spin_unlock(&port->port_lock);
 93		status = dbc_ep_queue(req);
 94		spin_lock(&port->port_lock);
 95
 96		if (status) {
 97			list_add(&req->list_pool, pool);
 98			break;
 99		}
100	}
101}
102
103static void
104dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
105{
106	unsigned long		flags;
107	struct dbc_port		*port = dbc_to_port(dbc);
 
108
109	spin_lock_irqsave(&port->port_lock, flags);
110	list_add_tail(&req->list_pool, &port->read_queue);
111	tasklet_schedule(&port->push);
112	spin_unlock_irqrestore(&port->port_lock, flags);
113}
114
115static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
116{
117	unsigned long		flags;
118	struct dbc_port		*port = dbc_to_port(dbc);
 
119
120	spin_lock_irqsave(&port->port_lock, flags);
121	list_add(&req->list_pool, &port->write_pool);
122	switch (req->status) {
123	case 0:
124		dbc_start_tx(port);
125		break;
126	case -ESHUTDOWN:
127		break;
128	default:
129		dev_warn(dbc->dev, "unexpected write complete status %d\n",
130			  req->status);
131		break;
132	}
133	spin_unlock_irqrestore(&port->port_lock, flags);
134}
135
136static void xhci_dbc_free_req(struct dbc_request *req)
137{
138	kfree(req->buf);
139	dbc_free_request(req);
140}
141
142static int
143xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
144			struct list_head *head,
145			void (*fn)(struct xhci_dbc *, struct dbc_request *))
146{
147	int			i;
148	struct dbc_request	*req;
149
150	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
151		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
152		if (!req)
153			break;
154
155		req->length = DBC_MAX_PACKET;
156		req->buf = kmalloc(req->length, GFP_KERNEL);
157		if (!req->buf) {
158			dbc_free_request(req);
159			break;
160		}
161
162		req->complete = fn;
163		list_add_tail(&req->list_pool, head);
164	}
165
166	return list_empty(head) ? -ENOMEM : 0;
167}
168
169static void
170xhci_dbc_free_requests(struct list_head *head)
171{
172	struct dbc_request	*req;
173
174	while (!list_empty(head)) {
175		req = list_entry(head->next, struct dbc_request, list_pool);
176		list_del(&req->list_pool);
177		xhci_dbc_free_req(req);
178	}
179}
180
181static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
182{
183	struct dbc_port		*port;
184
185	mutex_lock(&dbc_tty_minors_lock);
186	port = idr_find(&dbc_tty_minors, tty->index);
187	mutex_unlock(&dbc_tty_minors_lock);
188
189	if (!port)
190		return -ENXIO;
191
192	tty->driver_data = port;
193
194	return tty_port_install(&port->port, driver, tty);
195}
196
197static int dbc_tty_open(struct tty_struct *tty, struct file *file)
198{
199	struct dbc_port		*port = tty->driver_data;
200
201	return tty_port_open(&port->port, tty, file);
202}
203
204static void dbc_tty_close(struct tty_struct *tty, struct file *file)
205{
206	struct dbc_port		*port = tty->driver_data;
207
208	tty_port_close(&port->port, tty, file);
209}
210
211static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
212			     size_t count)
 
213{
214	struct dbc_port		*port = tty->driver_data;
215	unsigned long		flags;
216
217	spin_lock_irqsave(&port->port_lock, flags);
218	if (count)
219		count = kfifo_in(&port->write_fifo, buf, count);
220	dbc_start_tx(port);
221	spin_unlock_irqrestore(&port->port_lock, flags);
222
223	return count;
224}
225
226static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
227{
228	struct dbc_port		*port = tty->driver_data;
229	unsigned long		flags;
230	int			status;
231
232	spin_lock_irqsave(&port->port_lock, flags);
233	status = kfifo_put(&port->write_fifo, ch);
234	spin_unlock_irqrestore(&port->port_lock, flags);
235
236	return status;
237}
238
239static void dbc_tty_flush_chars(struct tty_struct *tty)
240{
241	struct dbc_port		*port = tty->driver_data;
242	unsigned long		flags;
243
244	spin_lock_irqsave(&port->port_lock, flags);
245	dbc_start_tx(port);
246	spin_unlock_irqrestore(&port->port_lock, flags);
247}
248
249static unsigned int dbc_tty_write_room(struct tty_struct *tty)
250{
251	struct dbc_port		*port = tty->driver_data;
252	unsigned long		flags;
253	unsigned int		room;
254
255	spin_lock_irqsave(&port->port_lock, flags);
256	room = kfifo_avail(&port->write_fifo);
257	spin_unlock_irqrestore(&port->port_lock, flags);
258
259	return room;
260}
261
262static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
263{
264	struct dbc_port		*port = tty->driver_data;
265	unsigned long		flags;
266	unsigned int		chars;
267
268	spin_lock_irqsave(&port->port_lock, flags);
269	chars = kfifo_len(&port->write_fifo);
270	spin_unlock_irqrestore(&port->port_lock, flags);
271
272	return chars;
273}
274
275static void dbc_tty_unthrottle(struct tty_struct *tty)
276{
277	struct dbc_port		*port = tty->driver_data;
278	unsigned long		flags;
279
280	spin_lock_irqsave(&port->port_lock, flags);
281	tasklet_schedule(&port->push);
282	spin_unlock_irqrestore(&port->port_lock, flags);
283}
284
285static const struct tty_operations dbc_tty_ops = {
286	.install		= dbc_tty_install,
287	.open			= dbc_tty_open,
288	.close			= dbc_tty_close,
289	.write			= dbc_tty_write,
290	.put_char		= dbc_tty_put_char,
291	.flush_chars		= dbc_tty_flush_chars,
292	.write_room		= dbc_tty_write_room,
293	.chars_in_buffer	= dbc_tty_chars_in_buffer,
294	.unthrottle		= dbc_tty_unthrottle,
295};
296
297static void dbc_rx_push(struct tasklet_struct *t)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298{
299	struct dbc_request	*req;
300	struct tty_struct	*tty;
301	unsigned long		flags;
302	bool			do_push = false;
303	bool			disconnect = false;
304	struct dbc_port		*port = from_tasklet(port, t, push);
305	struct list_head	*queue = &port->read_queue;
306
307	spin_lock_irqsave(&port->port_lock, flags);
308	tty = port->port.tty;
309	while (!list_empty(queue)) {
310		req = list_first_entry(queue, struct dbc_request, list_pool);
311
312		if (tty && tty_throttled(tty))
313			break;
314
315		switch (req->status) {
316		case 0:
317			break;
318		case -ESHUTDOWN:
319			disconnect = true;
320			break;
321		default:
322			pr_warn("ttyDBC0: unexpected RX status %d\n",
323				req->status);
324			break;
325		}
326
327		if (req->actual) {
328			char		*packet = req->buf;
329			unsigned int	n, size = req->actual;
330			int		count;
331
332			n = port->n_read;
333			if (n) {
334				packet += n;
335				size -= n;
336			}
337
338			count = tty_insert_flip_string(&port->port, packet,
339						       size);
340			if (count)
341				do_push = true;
342			if (count != size) {
343				port->n_read += count;
344				break;
345			}
346			port->n_read = 0;
347		}
348
349		list_move(&req->list_pool, &port->read_pool);
350	}
351
352	if (do_push)
353		tty_flip_buffer_push(&port->port);
354
355	if (!list_empty(queue) && tty) {
356		if (!tty_throttled(tty)) {
357			if (do_push)
358				tasklet_schedule(&port->push);
359			else
360				pr_warn("ttyDBC0: RX not scheduled?\n");
361		}
362	}
363
364	if (!disconnect)
365		dbc_start_rx(port);
366
367	spin_unlock_irqrestore(&port->port_lock, flags);
368}
369
370static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
371{
372	unsigned long	flags;
373	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
374
375	spin_lock_irqsave(&port->port_lock, flags);
376	dbc_start_rx(port);
377	spin_unlock_irqrestore(&port->port_lock, flags);
378
379	return 0;
380}
381
382static const struct tty_port_operations dbc_port_ops = {
383	.activate =	dbc_port_activate,
384};
385
386static void
387xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
388{
389	tty_port_init(&port->port);
390	spin_lock_init(&port->port_lock);
391	tasklet_setup(&port->push, dbc_rx_push);
392	INIT_LIST_HEAD(&port->read_pool);
393	INIT_LIST_HEAD(&port->read_queue);
394	INIT_LIST_HEAD(&port->write_pool);
395
 
 
396	port->port.ops =	&dbc_port_ops;
397	port->n_read =		0;
398}
399
400static void
401xhci_dbc_tty_exit_port(struct dbc_port *port)
402{
403	tasklet_kill(&port->push);
404	tty_port_destroy(&port->port);
405}
406
407static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
408{
409	int			ret;
410	struct device		*tty_dev;
411	struct dbc_port		*port = dbc_to_port(dbc);
412
413	if (port->registered)
414		return -EBUSY;
415
416	xhci_dbc_tty_init_port(dbc, port);
417
418	mutex_lock(&dbc_tty_minors_lock);
419	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
420	mutex_unlock(&dbc_tty_minors_lock);
421
422	if (port->minor < 0) {
423		ret = port->minor;
424		goto err_idr;
 
 
 
425	}
426
427	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
428	if (ret)
429		goto err_exit_port;
430
431	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
432				      dbc_read_complete);
433	if (ret)
434		goto err_free_fifo;
435
436	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
437				      dbc_write_complete);
438	if (ret)
439		goto err_free_requests;
440
441	tty_dev = tty_port_register_device(&port->port,
442					   dbc_tty_driver, port->minor, NULL);
443	if (IS_ERR(tty_dev)) {
444		ret = PTR_ERR(tty_dev);
445		goto err_free_requests;
446	}
447
448	port->registered = true;
449
450	return 0;
451
452err_free_requests:
453	xhci_dbc_free_requests(&port->read_pool);
454	xhci_dbc_free_requests(&port->write_pool);
455err_free_fifo:
456	kfifo_free(&port->write_fifo);
457err_exit_port:
458	idr_remove(&dbc_tty_minors, port->minor);
459err_idr:
 
 
460	xhci_dbc_tty_exit_port(port);
461
462	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
463
464	return ret;
465}
466
467static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
468{
469	struct dbc_port		*port = dbc_to_port(dbc);
 
470
471	if (!port->registered)
472		return;
473	tty_unregister_device(dbc_tty_driver, port->minor);
474	xhci_dbc_tty_exit_port(port);
475	port->registered = false;
476
477	mutex_lock(&dbc_tty_minors_lock);
478	idr_remove(&dbc_tty_minors, port->minor);
479	mutex_unlock(&dbc_tty_minors_lock);
480
481	kfifo_free(&port->write_fifo);
482	xhci_dbc_free_requests(&port->read_pool);
483	xhci_dbc_free_requests(&port->read_queue);
484	xhci_dbc_free_requests(&port->write_pool);
485}
486
487static const struct dbc_driver dbc_driver = {
488	.configure		= xhci_dbc_tty_register_device,
489	.disconnect		= xhci_dbc_tty_unregister_device,
490};
491
492int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
493{
494	struct xhci_dbc		*dbc;
495	struct dbc_port		*port;
496	int			status;
497
498	if (!dbc_tty_driver)
499		return -ENODEV;
500
501	port = kzalloc(sizeof(*port), GFP_KERNEL);
502	if (!port)
503		return -ENOMEM;
504
505	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
506
507	if (!dbc) {
508		status = -ENOMEM;
509		goto out2;
510	}
511
512	dbc->priv = port;
513
514	/* get rid of xhci once this is a real driver binding to a device */
515	xhci->dbc = dbc;
516
517	return 0;
518out2:
519	kfree(port);
520
521	return status;
522}
523
524/*
525 * undo what probe did, assume dbc is stopped already.
526 * we also assume tty_unregister_device() is called before this
527 */
528void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
529{
530	struct dbc_port         *port = dbc_to_port(dbc);
531
532	xhci_dbc_remove(dbc);
533	kfree(port);
534}
535
536int dbc_tty_init(void)
537{
538	int		ret;
539
540	idr_init(&dbc_tty_minors);
541
542	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
543					  TTY_DRIVER_DYNAMIC_DEV);
544	if (IS_ERR(dbc_tty_driver)) {
545		idr_destroy(&dbc_tty_minors);
546		return PTR_ERR(dbc_tty_driver);
547	}
548
549	dbc_tty_driver->driver_name = "dbc_serial";
550	dbc_tty_driver->name = "ttyDBC";
551
552	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
553	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
554	dbc_tty_driver->init_termios = tty_std_termios;
555	dbc_tty_driver->init_termios.c_cflag =
556			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
557	dbc_tty_driver->init_termios.c_ispeed = 9600;
558	dbc_tty_driver->init_termios.c_ospeed = 9600;
559
560	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
561
562	ret = tty_register_driver(dbc_tty_driver);
563	if (ret) {
564		pr_err("Can't register dbc tty driver\n");
565		tty_driver_kref_put(dbc_tty_driver);
566		idr_destroy(&dbc_tty_minors);
567	}
568
569	return ret;
570}
571
572void dbc_tty_exit(void)
573{
574	if (dbc_tty_driver) {
575		tty_unregister_driver(dbc_tty_driver);
576		tty_driver_kref_put(dbc_tty_driver);
577		dbc_tty_driver = NULL;
578	}
579
580	idr_destroy(&dbc_tty_minors);
581}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/**
  3 * xhci-dbgtty.c - tty glue for xHCI debug capability
  4 *
  5 * Copyright (C) 2017 Intel Corporation
  6 *
  7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/tty.h>
 12#include <linux/tty_flip.h>
 
 13
 14#include "xhci.h"
 15#include "xhci-dbgcap.h"
 16
 
 
 
 
 
 
 
 
 
 17static unsigned int
 18dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
 19{
 20	unsigned int		len;
 21
 22	len = kfifo_len(&port->write_fifo);
 23	if (len < size)
 24		size = len;
 25	if (size != 0)
 26		size = kfifo_out(&port->write_fifo, packet, size);
 27	return size;
 28}
 29
 30static int dbc_start_tx(struct dbc_port *port)
 31	__releases(&port->port_lock)
 32	__acquires(&port->port_lock)
 33{
 34	int			len;
 35	struct dbc_request	*req;
 36	int			status = 0;
 37	bool			do_tty_wake = false;
 38	struct list_head	*pool = &port->write_pool;
 39
 40	while (!list_empty(pool)) {
 41		req = list_entry(pool->next, struct dbc_request, list_pool);
 42		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
 43		if (len == 0)
 44			break;
 45		do_tty_wake = true;
 46
 47		req->length = len;
 48		list_del(&req->list_pool);
 49
 50		spin_unlock(&port->port_lock);
 51		status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
 52		spin_lock(&port->port_lock);
 53
 54		if (status) {
 55			list_add(&req->list_pool, pool);
 56			break;
 57		}
 58	}
 59
 60	if (do_tty_wake && port->port.tty)
 61		tty_wakeup(port->port.tty);
 62
 63	return status;
 64}
 65
 66static void dbc_start_rx(struct dbc_port *port)
 67	__releases(&port->port_lock)
 68	__acquires(&port->port_lock)
 69{
 70	struct dbc_request	*req;
 71	int			status;
 72	struct list_head	*pool = &port->read_pool;
 73
 74	while (!list_empty(pool)) {
 75		if (!port->port.tty)
 76			break;
 77
 78		req = list_entry(pool->next, struct dbc_request, list_pool);
 79		list_del(&req->list_pool);
 80		req->length = DBC_MAX_PACKET;
 81
 82		spin_unlock(&port->port_lock);
 83		status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
 84		spin_lock(&port->port_lock);
 85
 86		if (status) {
 87			list_add(&req->list_pool, pool);
 88			break;
 89		}
 90	}
 91}
 92
 93static void
 94dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
 95{
 96	unsigned long		flags;
 97	struct xhci_dbc		*dbc = xhci->dbc;
 98	struct dbc_port		*port = &dbc->port;
 99
100	spin_lock_irqsave(&port->port_lock, flags);
101	list_add_tail(&req->list_pool, &port->read_queue);
102	tasklet_schedule(&port->push);
103	spin_unlock_irqrestore(&port->port_lock, flags);
104}
105
106static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
107{
108	unsigned long		flags;
109	struct xhci_dbc		*dbc = xhci->dbc;
110	struct dbc_port		*port = &dbc->port;
111
112	spin_lock_irqsave(&port->port_lock, flags);
113	list_add(&req->list_pool, &port->write_pool);
114	switch (req->status) {
115	case 0:
116		dbc_start_tx(port);
117		break;
118	case -ESHUTDOWN:
119		break;
120	default:
121		xhci_warn(xhci, "unexpected write complete status %d\n",
122			  req->status);
123		break;
124	}
125	spin_unlock_irqrestore(&port->port_lock, flags);
126}
127
128static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
129{
130	kfree(req->buf);
131	dbc_free_request(dep, req);
132}
133
134static int
135xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
136			void (*fn)(struct xhci_hcd *, struct dbc_request *))
 
137{
138	int			i;
139	struct dbc_request	*req;
140
141	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
142		req = dbc_alloc_request(dep, GFP_KERNEL);
143		if (!req)
144			break;
145
146		req->length = DBC_MAX_PACKET;
147		req->buf = kmalloc(req->length, GFP_KERNEL);
148		if (!req->buf) {
149			dbc_free_request(dep, req);
150			break;
151		}
152
153		req->complete = fn;
154		list_add_tail(&req->list_pool, head);
155	}
156
157	return list_empty(head) ? -ENOMEM : 0;
158}
159
160static void
161xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
162{
163	struct dbc_request	*req;
164
165	while (!list_empty(head)) {
166		req = list_entry(head->next, struct dbc_request, list_pool);
167		list_del(&req->list_pool);
168		xhci_dbc_free_req(dep, req);
169	}
170}
171
172static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
173{
174	struct dbc_port		*port = driver->driver_state;
 
 
 
 
 
 
 
175
176	tty->driver_data = port;
177
178	return tty_port_install(&port->port, driver, tty);
179}
180
181static int dbc_tty_open(struct tty_struct *tty, struct file *file)
182{
183	struct dbc_port		*port = tty->driver_data;
184
185	return tty_port_open(&port->port, tty, file);
186}
187
188static void dbc_tty_close(struct tty_struct *tty, struct file *file)
189{
190	struct dbc_port		*port = tty->driver_data;
191
192	tty_port_close(&port->port, tty, file);
193}
194
195static int dbc_tty_write(struct tty_struct *tty,
196			 const unsigned char *buf,
197			 int count)
198{
199	struct dbc_port		*port = tty->driver_data;
200	unsigned long		flags;
201
202	spin_lock_irqsave(&port->port_lock, flags);
203	if (count)
204		count = kfifo_in(&port->write_fifo, buf, count);
205	dbc_start_tx(port);
206	spin_unlock_irqrestore(&port->port_lock, flags);
207
208	return count;
209}
210
211static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
212{
213	struct dbc_port		*port = tty->driver_data;
214	unsigned long		flags;
215	int			status;
216
217	spin_lock_irqsave(&port->port_lock, flags);
218	status = kfifo_put(&port->write_fifo, ch);
219	spin_unlock_irqrestore(&port->port_lock, flags);
220
221	return status;
222}
223
224static void dbc_tty_flush_chars(struct tty_struct *tty)
225{
226	struct dbc_port		*port = tty->driver_data;
227	unsigned long		flags;
228
229	spin_lock_irqsave(&port->port_lock, flags);
230	dbc_start_tx(port);
231	spin_unlock_irqrestore(&port->port_lock, flags);
232}
233
234static int dbc_tty_write_room(struct tty_struct *tty)
235{
236	struct dbc_port		*port = tty->driver_data;
237	unsigned long		flags;
238	int			room = 0;
239
240	spin_lock_irqsave(&port->port_lock, flags);
241	room = kfifo_avail(&port->write_fifo);
242	spin_unlock_irqrestore(&port->port_lock, flags);
243
244	return room;
245}
246
247static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
248{
249	struct dbc_port		*port = tty->driver_data;
250	unsigned long		flags;
251	int			chars = 0;
252
253	spin_lock_irqsave(&port->port_lock, flags);
254	chars = kfifo_len(&port->write_fifo);
255	spin_unlock_irqrestore(&port->port_lock, flags);
256
257	return chars;
258}
259
260static void dbc_tty_unthrottle(struct tty_struct *tty)
261{
262	struct dbc_port		*port = tty->driver_data;
263	unsigned long		flags;
264
265	spin_lock_irqsave(&port->port_lock, flags);
266	tasklet_schedule(&port->push);
267	spin_unlock_irqrestore(&port->port_lock, flags);
268}
269
270static const struct tty_operations dbc_tty_ops = {
271	.install		= dbc_tty_install,
272	.open			= dbc_tty_open,
273	.close			= dbc_tty_close,
274	.write			= dbc_tty_write,
275	.put_char		= dbc_tty_put_char,
276	.flush_chars		= dbc_tty_flush_chars,
277	.write_room		= dbc_tty_write_room,
278	.chars_in_buffer	= dbc_tty_chars_in_buffer,
279	.unthrottle		= dbc_tty_unthrottle,
280};
281
282static struct tty_driver *dbc_tty_driver;
283
284int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
285{
286	int			status;
287	struct xhci_dbc		*dbc = xhci->dbc;
288
289	dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
290					  TTY_DRIVER_DYNAMIC_DEV);
291	if (IS_ERR(dbc_tty_driver)) {
292		status = PTR_ERR(dbc_tty_driver);
293		dbc_tty_driver = NULL;
294		return status;
295	}
296
297	dbc_tty_driver->driver_name = "dbc_serial";
298	dbc_tty_driver->name = "ttyDBC";
299
300	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
301	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
302	dbc_tty_driver->init_termios = tty_std_termios;
303	dbc_tty_driver->init_termios.c_cflag =
304			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
305	dbc_tty_driver->init_termios.c_ispeed = 9600;
306	dbc_tty_driver->init_termios.c_ospeed = 9600;
307	dbc_tty_driver->driver_state = &dbc->port;
308
309	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
310
311	status = tty_register_driver(dbc_tty_driver);
312	if (status) {
313		xhci_err(xhci,
314			 "can't register dbc tty driver, err %d\n", status);
315		put_tty_driver(dbc_tty_driver);
316		dbc_tty_driver = NULL;
317	}
318
319	return status;
320}
321
322void xhci_dbc_tty_unregister_driver(void)
323{
324	if (dbc_tty_driver) {
325		tty_unregister_driver(dbc_tty_driver);
326		put_tty_driver(dbc_tty_driver);
327		dbc_tty_driver = NULL;
328	}
329}
330
331static void dbc_rx_push(unsigned long _port)
332{
333	struct dbc_request	*req;
334	struct tty_struct	*tty;
335	unsigned long		flags;
336	bool			do_push = false;
337	bool			disconnect = false;
338	struct dbc_port		*port = (void *)_port;
339	struct list_head	*queue = &port->read_queue;
340
341	spin_lock_irqsave(&port->port_lock, flags);
342	tty = port->port.tty;
343	while (!list_empty(queue)) {
344		req = list_first_entry(queue, struct dbc_request, list_pool);
345
346		if (tty && tty_throttled(tty))
347			break;
348
349		switch (req->status) {
350		case 0:
351			break;
352		case -ESHUTDOWN:
353			disconnect = true;
354			break;
355		default:
356			pr_warn("ttyDBC0: unexpected RX status %d\n",
357				req->status);
358			break;
359		}
360
361		if (req->actual) {
362			char		*packet = req->buf;
363			unsigned int	n, size = req->actual;
364			int		count;
365
366			n = port->n_read;
367			if (n) {
368				packet += n;
369				size -= n;
370			}
371
372			count = tty_insert_flip_string(&port->port, packet,
373						       size);
374			if (count)
375				do_push = true;
376			if (count != size) {
377				port->n_read += count;
378				break;
379			}
380			port->n_read = 0;
381		}
382
383		list_move(&req->list_pool, &port->read_pool);
384	}
385
386	if (do_push)
387		tty_flip_buffer_push(&port->port);
388
389	if (!list_empty(queue) && tty) {
390		if (!tty_throttled(tty)) {
391			if (do_push)
392				tasklet_schedule(&port->push);
393			else
394				pr_warn("ttyDBC0: RX not scheduled?\n");
395		}
396	}
397
398	if (!disconnect)
399		dbc_start_rx(port);
400
401	spin_unlock_irqrestore(&port->port_lock, flags);
402}
403
404static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
405{
406	unsigned long	flags;
407	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
408
409	spin_lock_irqsave(&port->port_lock, flags);
410	dbc_start_rx(port);
411	spin_unlock_irqrestore(&port->port_lock, flags);
412
413	return 0;
414}
415
416static const struct tty_port_operations dbc_port_ops = {
417	.activate =	dbc_port_activate,
418};
419
420static void
421xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
422{
423	tty_port_init(&port->port);
424	spin_lock_init(&port->port_lock);
425	tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
426	INIT_LIST_HEAD(&port->read_pool);
427	INIT_LIST_HEAD(&port->read_queue);
428	INIT_LIST_HEAD(&port->write_pool);
429
430	port->in =		get_in_ep(xhci);
431	port->out =		get_out_ep(xhci);
432	port->port.ops =	&dbc_port_ops;
433	port->n_read =		0;
434}
435
436static void
437xhci_dbc_tty_exit_port(struct dbc_port *port)
438{
439	tasklet_kill(&port->push);
440	tty_port_destroy(&port->port);
441}
442
443int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
444{
445	int			ret;
446	struct device		*tty_dev;
447	struct xhci_dbc		*dbc = xhci->dbc;
448	struct dbc_port		*port = &dbc->port;
 
 
 
 
 
 
 
 
449
450	xhci_dbc_tty_init_port(xhci, port);
451	tty_dev = tty_port_register_device(&port->port,
452					   dbc_tty_driver, 0, NULL);
453	if (IS_ERR(tty_dev)) {
454		ret = PTR_ERR(tty_dev);
455		goto register_fail;
456	}
457
458	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
459	if (ret)
460		goto buf_alloc_fail;
461
462	ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
463				      dbc_read_complete);
464	if (ret)
465		goto request_fail;
466
467	ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
468				      dbc_write_complete);
469	if (ret)
470		goto request_fail;
 
 
 
 
 
 
 
471
472	port->registered = true;
473
474	return 0;
475
476request_fail:
477	xhci_dbc_free_requests(port->in, &port->read_pool);
478	xhci_dbc_free_requests(port->out, &port->write_pool);
 
479	kfifo_free(&port->write_fifo);
480
481buf_alloc_fail:
482	tty_unregister_device(dbc_tty_driver, 0);
483
484register_fail:
485	xhci_dbc_tty_exit_port(port);
486
487	xhci_err(xhci, "can't register tty port, err %d\n", ret);
488
489	return ret;
490}
491
492void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
493{
494	struct xhci_dbc		*dbc = xhci->dbc;
495	struct dbc_port		*port = &dbc->port;
496
497	tty_unregister_device(dbc_tty_driver, 0);
 
 
498	xhci_dbc_tty_exit_port(port);
499	port->registered = false;
500
 
 
 
 
501	kfifo_free(&port->write_fifo);
502	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
503	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
504	xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505}