Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * xhci-dbgtty.c - tty glue for xHCI debug capability
  4 *
  5 * Copyright (C) 2017 Intel Corporation
  6 *
  7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/tty.h>
 12#include <linux/tty_flip.h>
 13#include <linux/idr.h>
 14
 15#include "xhci.h"
 16#include "xhci-dbgcap.h"
 17
 18static struct tty_driver *dbc_tty_driver;
 19static struct idr dbc_tty_minors;
 20static DEFINE_MUTEX(dbc_tty_minors_lock);
 21
 22static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
 23{
 24	return dbc->priv;
 25}
 26
 27static unsigned int
 28dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
 29{
 30	unsigned int		len;
 31
 32	len = kfifo_len(&port->write_fifo);
 33	if (len < size)
 34		size = len;
 35	if (size != 0)
 36		size = kfifo_out(&port->write_fifo, packet, size);
 37	return size;
 
 
 
 
 
 
 
 
 
 
 38}
 39
 40static int dbc_start_tx(struct dbc_port *port)
 41	__releases(&port->port_lock)
 42	__acquires(&port->port_lock)
 43{
 44	int			len;
 45	struct dbc_request	*req;
 46	int			status = 0;
 47	bool			do_tty_wake = false;
 48	struct list_head	*pool = &port->write_pool;
 49
 50	while (!list_empty(pool)) {
 51		req = list_entry(pool->next, struct dbc_request, list_pool);
 52		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
 53		if (len == 0)
 54			break;
 55		do_tty_wake = true;
 56
 57		req->length = len;
 58		list_del(&req->list_pool);
 59
 60		spin_unlock(&port->port_lock);
 61		status = dbc_ep_queue(req);
 62		spin_lock(&port->port_lock);
 63
 64		if (status) {
 65			list_add(&req->list_pool, pool);
 66			break;
 67		}
 68	}
 69
 70	if (do_tty_wake && port->port.tty)
 71		tty_wakeup(port->port.tty);
 72
 73	return status;
 74}
 75
 76static void dbc_start_rx(struct dbc_port *port)
 77	__releases(&port->port_lock)
 78	__acquires(&port->port_lock)
 79{
 80	struct dbc_request	*req;
 81	int			status;
 82	struct list_head	*pool = &port->read_pool;
 83
 84	while (!list_empty(pool)) {
 85		if (!port->port.tty)
 86			break;
 87
 88		req = list_entry(pool->next, struct dbc_request, list_pool);
 89		list_del(&req->list_pool);
 90		req->length = DBC_MAX_PACKET;
 91
 92		spin_unlock(&port->port_lock);
 93		status = dbc_ep_queue(req);
 94		spin_lock(&port->port_lock);
 95
 96		if (status) {
 97			list_add(&req->list_pool, pool);
 98			break;
 99		}
100	}
101}
102
103static void
104dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
105{
106	unsigned long		flags;
107	struct dbc_port		*port = dbc_to_port(dbc);
108
109	spin_lock_irqsave(&port->port_lock, flags);
110	list_add_tail(&req->list_pool, &port->read_queue);
111	tasklet_schedule(&port->push);
112	spin_unlock_irqrestore(&port->port_lock, flags);
113}
114
115static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
116{
117	unsigned long		flags;
118	struct dbc_port		*port = dbc_to_port(dbc);
119
120	spin_lock_irqsave(&port->port_lock, flags);
121	list_add(&req->list_pool, &port->write_pool);
122	switch (req->status) {
123	case 0:
124		dbc_start_tx(port);
125		break;
126	case -ESHUTDOWN:
127		break;
128	default:
129		dev_warn(dbc->dev, "unexpected write complete status %d\n",
130			  req->status);
131		break;
132	}
133	spin_unlock_irqrestore(&port->port_lock, flags);
134}
135
136static void xhci_dbc_free_req(struct dbc_request *req)
137{
138	kfree(req->buf);
139	dbc_free_request(req);
140}
141
142static int
143xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
144			struct list_head *head,
145			void (*fn)(struct xhci_dbc *, struct dbc_request *))
146{
147	int			i;
148	struct dbc_request	*req;
149
150	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
151		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
152		if (!req)
153			break;
154
155		req->length = DBC_MAX_PACKET;
156		req->buf = kmalloc(req->length, GFP_KERNEL);
157		if (!req->buf) {
158			dbc_free_request(req);
159			break;
160		}
161
162		req->complete = fn;
163		list_add_tail(&req->list_pool, head);
164	}
165
166	return list_empty(head) ? -ENOMEM : 0;
167}
168
169static void
170xhci_dbc_free_requests(struct list_head *head)
171{
172	struct dbc_request	*req;
173
174	while (!list_empty(head)) {
175		req = list_entry(head->next, struct dbc_request, list_pool);
176		list_del(&req->list_pool);
177		xhci_dbc_free_req(req);
178	}
179}
180
181static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
182{
183	struct dbc_port		*port;
184
185	mutex_lock(&dbc_tty_minors_lock);
186	port = idr_find(&dbc_tty_minors, tty->index);
187	mutex_unlock(&dbc_tty_minors_lock);
188
189	if (!port)
190		return -ENXIO;
191
192	tty->driver_data = port;
193
194	return tty_port_install(&port->port, driver, tty);
195}
196
197static int dbc_tty_open(struct tty_struct *tty, struct file *file)
198{
199	struct dbc_port		*port = tty->driver_data;
200
201	return tty_port_open(&port->port, tty, file);
202}
203
204static void dbc_tty_close(struct tty_struct *tty, struct file *file)
205{
206	struct dbc_port		*port = tty->driver_data;
207
208	tty_port_close(&port->port, tty, file);
209}
210
211static int dbc_tty_write(struct tty_struct *tty,
212			 const unsigned char *buf,
213			 int count)
214{
215	struct dbc_port		*port = tty->driver_data;
216	unsigned long		flags;
 
217
218	spin_lock_irqsave(&port->port_lock, flags);
219	if (count)
220		count = kfifo_in(&port->write_fifo, buf, count);
221	dbc_start_tx(port);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222	spin_unlock_irqrestore(&port->port_lock, flags);
223
224	return count;
225}
226
227static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
228{
229	struct dbc_port		*port = tty->driver_data;
230	unsigned long		flags;
231	int			status;
232
233	spin_lock_irqsave(&port->port_lock, flags);
234	status = kfifo_put(&port->write_fifo, ch);
235	spin_unlock_irqrestore(&port->port_lock, flags);
236
237	return status;
238}
239
240static void dbc_tty_flush_chars(struct tty_struct *tty)
241{
242	struct dbc_port		*port = tty->driver_data;
243	unsigned long		flags;
244
245	spin_lock_irqsave(&port->port_lock, flags);
246	dbc_start_tx(port);
247	spin_unlock_irqrestore(&port->port_lock, flags);
248}
249
250static unsigned int dbc_tty_write_room(struct tty_struct *tty)
251{
252	struct dbc_port		*port = tty->driver_data;
253	unsigned long		flags;
254	unsigned int		room;
255
256	spin_lock_irqsave(&port->port_lock, flags);
257	room = kfifo_avail(&port->write_fifo);
 
 
 
 
258	spin_unlock_irqrestore(&port->port_lock, flags);
259
260	return room;
261}
262
263static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
264{
265	struct dbc_port		*port = tty->driver_data;
266	unsigned long		flags;
267	unsigned int		chars;
268
269	spin_lock_irqsave(&port->port_lock, flags);
270	chars = kfifo_len(&port->write_fifo);
271	spin_unlock_irqrestore(&port->port_lock, flags);
272
273	return chars;
274}
275
276static void dbc_tty_unthrottle(struct tty_struct *tty)
277{
278	struct dbc_port		*port = tty->driver_data;
279	unsigned long		flags;
280
281	spin_lock_irqsave(&port->port_lock, flags);
282	tasklet_schedule(&port->push);
283	spin_unlock_irqrestore(&port->port_lock, flags);
284}
285
286static const struct tty_operations dbc_tty_ops = {
287	.install		= dbc_tty_install,
288	.open			= dbc_tty_open,
289	.close			= dbc_tty_close,
290	.write			= dbc_tty_write,
291	.put_char		= dbc_tty_put_char,
292	.flush_chars		= dbc_tty_flush_chars,
293	.write_room		= dbc_tty_write_room,
294	.chars_in_buffer	= dbc_tty_chars_in_buffer,
295	.unthrottle		= dbc_tty_unthrottle,
296};
297
298static void dbc_rx_push(struct tasklet_struct *t)
299{
300	struct dbc_request	*req;
301	struct tty_struct	*tty;
302	unsigned long		flags;
303	bool			do_push = false;
304	bool			disconnect = false;
305	struct dbc_port		*port = from_tasklet(port, t, push);
306	struct list_head	*queue = &port->read_queue;
307
308	spin_lock_irqsave(&port->port_lock, flags);
309	tty = port->port.tty;
310	while (!list_empty(queue)) {
311		req = list_first_entry(queue, struct dbc_request, list_pool);
312
313		if (tty && tty_throttled(tty))
314			break;
315
316		switch (req->status) {
317		case 0:
318			break;
319		case -ESHUTDOWN:
320			disconnect = true;
321			break;
322		default:
323			pr_warn("ttyDBC0: unexpected RX status %d\n",
324				req->status);
325			break;
326		}
327
328		if (req->actual) {
329			char		*packet = req->buf;
330			unsigned int	n, size = req->actual;
331			int		count;
332
333			n = port->n_read;
334			if (n) {
335				packet += n;
336				size -= n;
337			}
338
339			count = tty_insert_flip_string(&port->port, packet,
340						       size);
341			if (count)
342				do_push = true;
343			if (count != size) {
344				port->n_read += count;
345				break;
346			}
347			port->n_read = 0;
348		}
349
350		list_move(&req->list_pool, &port->read_pool);
351	}
352
353	if (do_push)
354		tty_flip_buffer_push(&port->port);
355
356	if (!list_empty(queue) && tty) {
357		if (!tty_throttled(tty)) {
358			if (do_push)
359				tasklet_schedule(&port->push);
360			else
361				pr_warn("ttyDBC0: RX not scheduled?\n");
362		}
363	}
364
365	if (!disconnect)
366		dbc_start_rx(port);
367
368	spin_unlock_irqrestore(&port->port_lock, flags);
369}
370
371static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
372{
373	unsigned long	flags;
374	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
375
376	spin_lock_irqsave(&port->port_lock, flags);
377	dbc_start_rx(port);
378	spin_unlock_irqrestore(&port->port_lock, flags);
379
380	return 0;
381}
382
383static const struct tty_port_operations dbc_port_ops = {
384	.activate =	dbc_port_activate,
385};
386
387static void
388xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
389{
390	tty_port_init(&port->port);
391	spin_lock_init(&port->port_lock);
392	tasklet_setup(&port->push, dbc_rx_push);
393	INIT_LIST_HEAD(&port->read_pool);
394	INIT_LIST_HEAD(&port->read_queue);
395	INIT_LIST_HEAD(&port->write_pool);
396
397	port->port.ops =	&dbc_port_ops;
398	port->n_read =		0;
399}
400
401static void
402xhci_dbc_tty_exit_port(struct dbc_port *port)
403{
404	tasklet_kill(&port->push);
405	tty_port_destroy(&port->port);
406}
407
408static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
409{
410	int			ret;
411	struct device		*tty_dev;
412	struct dbc_port		*port = dbc_to_port(dbc);
413
414	if (port->registered)
415		return -EBUSY;
416
417	xhci_dbc_tty_init_port(dbc, port);
418
419	mutex_lock(&dbc_tty_minors_lock);
420	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
421	mutex_unlock(&dbc_tty_minors_lock);
422
423	if (port->minor < 0) {
424		ret = port->minor;
425		goto err_idr;
426	}
427
428	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
 
429	if (ret)
430		goto err_exit_port;
431
432	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
433				      dbc_read_complete);
434	if (ret)
435		goto err_free_fifo;
436
437	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
438				      dbc_write_complete);
439	if (ret)
440		goto err_free_requests;
441
442	tty_dev = tty_port_register_device(&port->port,
443					   dbc_tty_driver, port->minor, NULL);
444	if (IS_ERR(tty_dev)) {
445		ret = PTR_ERR(tty_dev);
446		goto err_free_requests;
447	}
448
449	port->registered = true;
450
451	return 0;
452
453err_free_requests:
454	xhci_dbc_free_requests(&port->read_pool);
455	xhci_dbc_free_requests(&port->write_pool);
456err_free_fifo:
457	kfifo_free(&port->write_fifo);
458err_exit_port:
459	idr_remove(&dbc_tty_minors, port->minor);
460err_idr:
461	xhci_dbc_tty_exit_port(port);
462
463	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
464
465	return ret;
466}
467
468static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
469{
470	struct dbc_port		*port = dbc_to_port(dbc);
471
472	if (!port->registered)
473		return;
474	tty_unregister_device(dbc_tty_driver, port->minor);
475	xhci_dbc_tty_exit_port(port);
476	port->registered = false;
477
478	mutex_lock(&dbc_tty_minors_lock);
479	idr_remove(&dbc_tty_minors, port->minor);
480	mutex_unlock(&dbc_tty_minors_lock);
481
482	kfifo_free(&port->write_fifo);
483	xhci_dbc_free_requests(&port->read_pool);
484	xhci_dbc_free_requests(&port->read_queue);
485	xhci_dbc_free_requests(&port->write_pool);
486}
487
488static const struct dbc_driver dbc_driver = {
489	.configure		= xhci_dbc_tty_register_device,
490	.disconnect		= xhci_dbc_tty_unregister_device,
491};
492
493int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
494{
495	struct xhci_dbc		*dbc;
496	struct dbc_port		*port;
497	int			status;
498
499	if (!dbc_tty_driver)
500		return -ENODEV;
501
502	port = kzalloc(sizeof(*port), GFP_KERNEL);
503	if (!port)
504		return -ENOMEM;
505
506	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
507
508	if (!dbc) {
509		status = -ENOMEM;
510		goto out2;
511	}
512
513	dbc->priv = port;
514
515	/* get rid of xhci once this is a real driver binding to a device */
516	xhci->dbc = dbc;
517
518	return 0;
519out2:
520	kfree(port);
521
522	return status;
523}
524
525/*
526 * undo what probe did, assume dbc is stopped already.
527 * we also assume tty_unregister_device() is called before this
528 */
529void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
530{
531	struct dbc_port         *port = dbc_to_port(dbc);
532
533	xhci_dbc_remove(dbc);
534	kfree(port);
535}
536
537int dbc_tty_init(void)
538{
539	int		ret;
540
541	idr_init(&dbc_tty_minors);
542
543	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
544					  TTY_DRIVER_DYNAMIC_DEV);
545	if (IS_ERR(dbc_tty_driver)) {
546		idr_destroy(&dbc_tty_minors);
547		return PTR_ERR(dbc_tty_driver);
548	}
549
550	dbc_tty_driver->driver_name = "dbc_serial";
551	dbc_tty_driver->name = "ttyDBC";
552
553	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
554	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
555	dbc_tty_driver->init_termios = tty_std_termios;
556	dbc_tty_driver->init_termios.c_cflag =
557			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
558	dbc_tty_driver->init_termios.c_ispeed = 9600;
559	dbc_tty_driver->init_termios.c_ospeed = 9600;
560
561	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
562
563	ret = tty_register_driver(dbc_tty_driver);
564	if (ret) {
565		pr_err("Can't register dbc tty driver\n");
566		tty_driver_kref_put(dbc_tty_driver);
567		idr_destroy(&dbc_tty_minors);
568	}
569
570	return ret;
571}
572
573void dbc_tty_exit(void)
574{
575	if (dbc_tty_driver) {
576		tty_unregister_driver(dbc_tty_driver);
577		tty_driver_kref_put(dbc_tty_driver);
578		dbc_tty_driver = NULL;
579	}
580
581	idr_destroy(&dbc_tty_minors);
582}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * xhci-dbgtty.c - tty glue for xHCI debug capability
  4 *
  5 * Copyright (C) 2017 Intel Corporation
  6 *
  7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8 */
  9
 10#include <linux/slab.h>
 11#include <linux/tty.h>
 12#include <linux/tty_flip.h>
 13#include <linux/idr.h>
 14
 15#include "xhci.h"
 16#include "xhci-dbgcap.h"
 17
 18static struct tty_driver *dbc_tty_driver;
 19static struct idr dbc_tty_minors;
 20static DEFINE_MUTEX(dbc_tty_minors_lock);
 21
 22static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
 23{
 24	return dbc->priv;
 25}
 26
 27static unsigned int
 28dbc_kfifo_to_req(struct dbc_port *port, char *packet)
 29{
 30	unsigned int	len;
 31
 32	len = kfifo_len(&port->port.xmit_fifo);
 33
 34	if (len == 0)
 35		return 0;
 36
 37	len = min(len, DBC_MAX_PACKET);
 38
 39	if (port->tx_boundary)
 40		len = min(port->tx_boundary, len);
 41
 42	len = kfifo_out(&port->port.xmit_fifo, packet, len);
 43
 44	if (port->tx_boundary)
 45		port->tx_boundary -= len;
 46
 47	return len;
 48}
 49
 50static int dbc_start_tx(struct dbc_port *port)
 51	__releases(&port->port_lock)
 52	__acquires(&port->port_lock)
 53{
 54	int			len;
 55	struct dbc_request	*req;
 56	int			status = 0;
 57	bool			do_tty_wake = false;
 58	struct list_head	*pool = &port->write_pool;
 59
 60	while (!list_empty(pool)) {
 61		req = list_entry(pool->next, struct dbc_request, list_pool);
 62		len = dbc_kfifo_to_req(port, req->buf);
 63		if (len == 0)
 64			break;
 65		do_tty_wake = true;
 66
 67		req->length = len;
 68		list_del(&req->list_pool);
 69
 70		spin_unlock(&port->port_lock);
 71		status = dbc_ep_queue(req);
 72		spin_lock(&port->port_lock);
 73
 74		if (status) {
 75			list_add(&req->list_pool, pool);
 76			break;
 77		}
 78	}
 79
 80	if (do_tty_wake && port->port.tty)
 81		tty_wakeup(port->port.tty);
 82
 83	return status;
 84}
 85
 86static void dbc_start_rx(struct dbc_port *port)
 87	__releases(&port->port_lock)
 88	__acquires(&port->port_lock)
 89{
 90	struct dbc_request	*req;
 91	int			status;
 92	struct list_head	*pool = &port->read_pool;
 93
 94	while (!list_empty(pool)) {
 95		if (!port->port.tty)
 96			break;
 97
 98		req = list_entry(pool->next, struct dbc_request, list_pool);
 99		list_del(&req->list_pool);
100		req->length = DBC_MAX_PACKET;
101
102		spin_unlock(&port->port_lock);
103		status = dbc_ep_queue(req);
104		spin_lock(&port->port_lock);
105
106		if (status) {
107			list_add(&req->list_pool, pool);
108			break;
109		}
110	}
111}
112
113static void
114dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
115{
116	unsigned long		flags;
117	struct dbc_port		*port = dbc_to_port(dbc);
118
119	spin_lock_irqsave(&port->port_lock, flags);
120	list_add_tail(&req->list_pool, &port->read_queue);
121	tasklet_schedule(&port->push);
122	spin_unlock_irqrestore(&port->port_lock, flags);
123}
124
125static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
126{
127	unsigned long		flags;
128	struct dbc_port		*port = dbc_to_port(dbc);
129
130	spin_lock_irqsave(&port->port_lock, flags);
131	list_add(&req->list_pool, &port->write_pool);
132	switch (req->status) {
133	case 0:
134		dbc_start_tx(port);
135		break;
136	case -ESHUTDOWN:
137		break;
138	default:
139		dev_warn(dbc->dev, "unexpected write complete status %d\n",
140			  req->status);
141		break;
142	}
143	spin_unlock_irqrestore(&port->port_lock, flags);
144}
145
146static void xhci_dbc_free_req(struct dbc_request *req)
147{
148	kfree(req->buf);
149	dbc_free_request(req);
150}
151
152static int
153xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
154			struct list_head *head,
155			void (*fn)(struct xhci_dbc *, struct dbc_request *))
156{
157	int			i;
158	struct dbc_request	*req;
159
160	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
161		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
162		if (!req)
163			break;
164
165		req->length = DBC_MAX_PACKET;
166		req->buf = kmalloc(req->length, GFP_KERNEL);
167		if (!req->buf) {
168			dbc_free_request(req);
169			break;
170		}
171
172		req->complete = fn;
173		list_add_tail(&req->list_pool, head);
174	}
175
176	return list_empty(head) ? -ENOMEM : 0;
177}
178
179static void
180xhci_dbc_free_requests(struct list_head *head)
181{
182	struct dbc_request	*req;
183
184	while (!list_empty(head)) {
185		req = list_entry(head->next, struct dbc_request, list_pool);
186		list_del(&req->list_pool);
187		xhci_dbc_free_req(req);
188	}
189}
190
191static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
192{
193	struct dbc_port		*port;
194
195	mutex_lock(&dbc_tty_minors_lock);
196	port = idr_find(&dbc_tty_minors, tty->index);
197	mutex_unlock(&dbc_tty_minors_lock);
198
199	if (!port)
200		return -ENXIO;
201
202	tty->driver_data = port;
203
204	return tty_port_install(&port->port, driver, tty);
205}
206
207static int dbc_tty_open(struct tty_struct *tty, struct file *file)
208{
209	struct dbc_port		*port = tty->driver_data;
210
211	return tty_port_open(&port->port, tty, file);
212}
213
214static void dbc_tty_close(struct tty_struct *tty, struct file *file)
215{
216	struct dbc_port		*port = tty->driver_data;
217
218	tty_port_close(&port->port, tty, file);
219}
220
221static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
222			     size_t count)
 
223{
224	struct dbc_port		*port = tty->driver_data;
225	unsigned long		flags;
226	unsigned int		written = 0;
227
228	spin_lock_irqsave(&port->port_lock, flags);
229
230	/*
231	 * Treat tty write as one usb transfer. Make sure the writes are turned
232	 * into TRB request having the same size boundaries as the tty writes.
233	 * Don't add data to kfifo before previous write is turned into TRBs
234	 */
235	if (port->tx_boundary) {
236		spin_unlock_irqrestore(&port->port_lock, flags);
237		return 0;
238	}
239
240	if (count) {
241		written = kfifo_in(&port->port.xmit_fifo, buf, count);
242
243		if (written == count)
244			port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
245
246		dbc_start_tx(port);
247	}
248
249	spin_unlock_irqrestore(&port->port_lock, flags);
250
251	return written;
252}
253
254static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
255{
256	struct dbc_port		*port = tty->driver_data;
257	unsigned long		flags;
258	int			status;
259
260	spin_lock_irqsave(&port->port_lock, flags);
261	status = kfifo_put(&port->port.xmit_fifo, ch);
262	spin_unlock_irqrestore(&port->port_lock, flags);
263
264	return status;
265}
266
267static void dbc_tty_flush_chars(struct tty_struct *tty)
268{
269	struct dbc_port		*port = tty->driver_data;
270	unsigned long		flags;
271
272	spin_lock_irqsave(&port->port_lock, flags);
273	dbc_start_tx(port);
274	spin_unlock_irqrestore(&port->port_lock, flags);
275}
276
277static unsigned int dbc_tty_write_room(struct tty_struct *tty)
278{
279	struct dbc_port		*port = tty->driver_data;
280	unsigned long		flags;
281	unsigned int		room;
282
283	spin_lock_irqsave(&port->port_lock, flags);
284	room = kfifo_avail(&port->port.xmit_fifo);
285
286	if (port->tx_boundary)
287		room = 0;
288
289	spin_unlock_irqrestore(&port->port_lock, flags);
290
291	return room;
292}
293
294static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
295{
296	struct dbc_port		*port = tty->driver_data;
297	unsigned long		flags;
298	unsigned int		chars;
299
300	spin_lock_irqsave(&port->port_lock, flags);
301	chars = kfifo_len(&port->port.xmit_fifo);
302	spin_unlock_irqrestore(&port->port_lock, flags);
303
304	return chars;
305}
306
307static void dbc_tty_unthrottle(struct tty_struct *tty)
308{
309	struct dbc_port		*port = tty->driver_data;
310	unsigned long		flags;
311
312	spin_lock_irqsave(&port->port_lock, flags);
313	tasklet_schedule(&port->push);
314	spin_unlock_irqrestore(&port->port_lock, flags);
315}
316
317static const struct tty_operations dbc_tty_ops = {
318	.install		= dbc_tty_install,
319	.open			= dbc_tty_open,
320	.close			= dbc_tty_close,
321	.write			= dbc_tty_write,
322	.put_char		= dbc_tty_put_char,
323	.flush_chars		= dbc_tty_flush_chars,
324	.write_room		= dbc_tty_write_room,
325	.chars_in_buffer	= dbc_tty_chars_in_buffer,
326	.unthrottle		= dbc_tty_unthrottle,
327};
328
329static void dbc_rx_push(struct tasklet_struct *t)
330{
331	struct dbc_request	*req;
332	struct tty_struct	*tty;
333	unsigned long		flags;
334	bool			do_push = false;
335	bool			disconnect = false;
336	struct dbc_port		*port = from_tasklet(port, t, push);
337	struct list_head	*queue = &port->read_queue;
338
339	spin_lock_irqsave(&port->port_lock, flags);
340	tty = port->port.tty;
341	while (!list_empty(queue)) {
342		req = list_first_entry(queue, struct dbc_request, list_pool);
343
344		if (tty && tty_throttled(tty))
345			break;
346
347		switch (req->status) {
348		case 0:
349			break;
350		case -ESHUTDOWN:
351			disconnect = true;
352			break;
353		default:
354			pr_warn("ttyDBC0: unexpected RX status %d\n",
355				req->status);
356			break;
357		}
358
359		if (req->actual) {
360			char		*packet = req->buf;
361			unsigned int	n, size = req->actual;
362			int		count;
363
364			n = port->n_read;
365			if (n) {
366				packet += n;
367				size -= n;
368			}
369
370			count = tty_insert_flip_string(&port->port, packet,
371						       size);
372			if (count)
373				do_push = true;
374			if (count != size) {
375				port->n_read += count;
376				break;
377			}
378			port->n_read = 0;
379		}
380
381		list_move_tail(&req->list_pool, &port->read_pool);
382	}
383
384	if (do_push)
385		tty_flip_buffer_push(&port->port);
386
387	if (!list_empty(queue) && tty) {
388		if (!tty_throttled(tty)) {
389			if (do_push)
390				tasklet_schedule(&port->push);
391			else
392				pr_warn("ttyDBC0: RX not scheduled?\n");
393		}
394	}
395
396	if (!disconnect)
397		dbc_start_rx(port);
398
399	spin_unlock_irqrestore(&port->port_lock, flags);
400}
401
402static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
403{
404	unsigned long	flags;
405	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
406
407	spin_lock_irqsave(&port->port_lock, flags);
408	dbc_start_rx(port);
409	spin_unlock_irqrestore(&port->port_lock, flags);
410
411	return 0;
412}
413
414static const struct tty_port_operations dbc_port_ops = {
415	.activate =	dbc_port_activate,
416};
417
418static void
419xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
420{
421	tty_port_init(&port->port);
422	spin_lock_init(&port->port_lock);
423	tasklet_setup(&port->push, dbc_rx_push);
424	INIT_LIST_HEAD(&port->read_pool);
425	INIT_LIST_HEAD(&port->read_queue);
426	INIT_LIST_HEAD(&port->write_pool);
427
428	port->port.ops =	&dbc_port_ops;
429	port->n_read =		0;
430}
431
432static void
433xhci_dbc_tty_exit_port(struct dbc_port *port)
434{
435	tasklet_kill(&port->push);
436	tty_port_destroy(&port->port);
437}
438
439static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
440{
441	int			ret;
442	struct device		*tty_dev;
443	struct dbc_port		*port = dbc_to_port(dbc);
444
445	if (port->registered)
446		return -EBUSY;
447
448	xhci_dbc_tty_init_port(dbc, port);
449
450	mutex_lock(&dbc_tty_minors_lock);
451	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
452	mutex_unlock(&dbc_tty_minors_lock);
453
454	if (port->minor < 0) {
455		ret = port->minor;
456		goto err_idr;
457	}
458
459	ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
460			  GFP_KERNEL);
461	if (ret)
462		goto err_exit_port;
463
464	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
465				      dbc_read_complete);
466	if (ret)
467		goto err_free_fifo;
468
469	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
470				      dbc_write_complete);
471	if (ret)
472		goto err_free_requests;
473
474	tty_dev = tty_port_register_device(&port->port,
475					   dbc_tty_driver, port->minor, NULL);
476	if (IS_ERR(tty_dev)) {
477		ret = PTR_ERR(tty_dev);
478		goto err_free_requests;
479	}
480
481	port->registered = true;
482
483	return 0;
484
485err_free_requests:
486	xhci_dbc_free_requests(&port->read_pool);
487	xhci_dbc_free_requests(&port->write_pool);
488err_free_fifo:
489	kfifo_free(&port->port.xmit_fifo);
490err_exit_port:
491	idr_remove(&dbc_tty_minors, port->minor);
492err_idr:
493	xhci_dbc_tty_exit_port(port);
494
495	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
496
497	return ret;
498}
499
500static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
501{
502	struct dbc_port		*port = dbc_to_port(dbc);
503
504	if (!port->registered)
505		return;
506	tty_unregister_device(dbc_tty_driver, port->minor);
507	xhci_dbc_tty_exit_port(port);
508	port->registered = false;
509
510	mutex_lock(&dbc_tty_minors_lock);
511	idr_remove(&dbc_tty_minors, port->minor);
512	mutex_unlock(&dbc_tty_minors_lock);
513
514	kfifo_free(&port->port.xmit_fifo);
515	xhci_dbc_free_requests(&port->read_pool);
516	xhci_dbc_free_requests(&port->read_queue);
517	xhci_dbc_free_requests(&port->write_pool);
518}
519
520static const struct dbc_driver dbc_driver = {
521	.configure		= xhci_dbc_tty_register_device,
522	.disconnect		= xhci_dbc_tty_unregister_device,
523};
524
525int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
526{
527	struct xhci_dbc		*dbc;
528	struct dbc_port		*port;
529	int			status;
530
531	if (!dbc_tty_driver)
532		return -ENODEV;
533
534	port = kzalloc(sizeof(*port), GFP_KERNEL);
535	if (!port)
536		return -ENOMEM;
537
538	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
539
540	if (!dbc) {
541		status = -ENOMEM;
542		goto out2;
543	}
544
545	dbc->priv = port;
546
547	/* get rid of xhci once this is a real driver binding to a device */
548	xhci->dbc = dbc;
549
550	return 0;
551out2:
552	kfree(port);
553
554	return status;
555}
556
557/*
558 * undo what probe did, assume dbc is stopped already.
559 * we also assume tty_unregister_device() is called before this
560 */
561void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
562{
563	struct dbc_port         *port = dbc_to_port(dbc);
564
565	xhci_dbc_remove(dbc);
566	kfree(port);
567}
568
569int dbc_tty_init(void)
570{
571	int		ret;
572
573	idr_init(&dbc_tty_minors);
574
575	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
576					  TTY_DRIVER_DYNAMIC_DEV);
577	if (IS_ERR(dbc_tty_driver)) {
578		idr_destroy(&dbc_tty_minors);
579		return PTR_ERR(dbc_tty_driver);
580	}
581
582	dbc_tty_driver->driver_name = "dbc_serial";
583	dbc_tty_driver->name = "ttyDBC";
584
585	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
586	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
587	dbc_tty_driver->init_termios = tty_std_termios;
588	dbc_tty_driver->init_termios.c_cflag =
589			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
590	dbc_tty_driver->init_termios.c_ispeed = 9600;
591	dbc_tty_driver->init_termios.c_ospeed = 9600;
592
593	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
594
595	ret = tty_register_driver(dbc_tty_driver);
596	if (ret) {
597		pr_err("Can't register dbc tty driver\n");
598		tty_driver_kref_put(dbc_tty_driver);
599		idr_destroy(&dbc_tty_minors);
600	}
601
602	return ret;
603}
604
605void dbc_tty_exit(void)
606{
607	if (dbc_tty_driver) {
608		tty_unregister_driver(dbc_tty_driver);
609		tty_driver_kref_put(dbc_tty_driver);
610		dbc_tty_driver = NULL;
611	}
612
613	idr_destroy(&dbc_tty_minors);
614}