Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v3.5.6
 
  1/*
  2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include "linux/completion.h"
  7#include "linux/interrupt.h"
  8#include "linux/list.h"
  9#include "linux/mutex.h"
 10#include "linux/slab.h"
 11#include "linux/workqueue.h"
 12#include "asm/atomic.h"
 13#include "init.h"
 14#include "irq_kern.h"
 15#include "os.h"
 16#include "port.h"
 17
 18struct port_list {
 19	struct list_head list;
 20	atomic_t wait_count;
 21	int has_connection;
 22	struct completion done;
 23	int port;
 24	int fd;
 25	spinlock_t lock;
 26	struct list_head pending;
 27	struct list_head connections;
 28};
 29
 30struct port_dev {
 31	struct port_list *port;
 32	int helper_pid;
 33	int telnetd_pid;
 34};
 35
 36struct connection {
 37	struct list_head list;
 38	int fd;
 39	int helper_pid;
 40	int socket[2];
 41	int telnetd_pid;
 42	struct port_list *port;
 43};
 44
 45static irqreturn_t pipe_interrupt(int irq, void *data)
 46{
 47	struct connection *conn = data;
 48	int fd;
 
 49
 50	fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
 51	if (fd < 0) {
 52		if (fd == -EAGAIN)
 
 53			return IRQ_NONE;
 54
 55		printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
 56		       -fd);
 57		os_close_file(conn->fd);
 58	}
 59
 60	list_del(&conn->list);
 61
 62	conn->fd = fd;
 63	list_add(&conn->list, &conn->port->connections);
 64
 65	complete(&conn->port->done);
 66	return IRQ_HANDLED;
 67}
 68
 69#define NO_WAITER_MSG \
 70    "****\n" \
 71    "There are currently no UML consoles waiting for port connections.\n" \
 72    "Either disconnect from one to make it available or activate some more\n" \
 73    "by enabling more consoles in the UML /etc/inittab.\n" \
 74    "****\n"
 75
 76static int port_accept(struct port_list *port)
 77{
 78	struct connection *conn;
 79	int fd, socket[2], pid;
 80
 81	fd = port_connection(port->fd, socket, &pid);
 82	if (fd < 0) {
 83		if (fd != -EAGAIN)
 84			printk(KERN_ERR "port_accept : port_connection "
 85			       "returned %d\n", -fd);
 86		goto out;
 87	}
 88
 89	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
 90	if (conn == NULL) {
 91		printk(KERN_ERR "port_accept : failed to allocate "
 92		       "connection\n");
 93		goto out_close;
 94	}
 95	*conn = ((struct connection)
 96		{ .list 	= LIST_HEAD_INIT(conn->list),
 97		  .fd 		= fd,
 98		  .socket  	= { socket[0], socket[1] },
 99		  .telnetd_pid 	= pid,
100		  .port 	= port });
101
102	if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
103			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
104			  "telnetd", conn)) {
105		printk(KERN_ERR "port_accept : failed to get IRQ for "
106		       "telnetd\n");
107		goto out_free;
108	}
109
110	if (atomic_read(&port->wait_count) == 0) {
111		os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
112		printk(KERN_ERR "No one waiting for port\n");
113	}
114	list_add(&conn->list, &port->pending);
115	return 1;
116
117 out_free:
118	kfree(conn);
119 out_close:
120	os_close_file(fd);
121	os_kill_process(pid, 1);
122 out:
123	return 0;
124}
125
126static DEFINE_MUTEX(ports_mutex);
127static LIST_HEAD(ports);
128
129static void port_work_proc(struct work_struct *unused)
130{
131	struct port_list *port;
132	struct list_head *ele;
133	unsigned long flags;
134
135	local_irq_save(flags);
136	list_for_each(ele, &ports) {
137		port = list_entry(ele, struct port_list, list);
138		if (!port->has_connection)
139			continue;
140
141		reactivate_fd(port->fd, ACCEPT_IRQ);
142		while (port_accept(port))
143			;
144		port->has_connection = 0;
145	}
146	local_irq_restore(flags);
147}
148
149DECLARE_WORK(port_work, port_work_proc);
150
151static irqreturn_t port_interrupt(int irq, void *data)
152{
153	struct port_list *port = data;
154
155	port->has_connection = 1;
156	schedule_work(&port_work);
157	return IRQ_HANDLED;
158}
159
160void *port_data(int port_num)
161{
162	struct list_head *ele;
163	struct port_list *port;
164	struct port_dev *dev = NULL;
165	int fd;
166
167	mutex_lock(&ports_mutex);
168	list_for_each(ele, &ports) {
169		port = list_entry(ele, struct port_list, list);
170		if (port->port == port_num)
171			goto found;
172	}
173	port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
174	if (port == NULL) {
175		printk(KERN_ERR "Allocation of port list failed\n");
176		goto out;
177	}
178
179	fd = port_listen_fd(port_num);
180	if (fd < 0) {
181		printk(KERN_ERR "binding to port %d failed, errno = %d\n",
182		       port_num, -fd);
183		goto out_free;
184	}
185
186	if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
187			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
188			  "port", port)) {
189		printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
190		goto out_close;
191	}
192
193	*port = ((struct port_list)
194		{ .list 	 	= LIST_HEAD_INIT(port->list),
195		  .wait_count		= ATOMIC_INIT(0),
196		  .has_connection 	= 0,
197		  .port 	 	= port_num,
198		  .fd  			= fd,
199		  .pending 		= LIST_HEAD_INIT(port->pending),
200		  .connections 		= LIST_HEAD_INIT(port->connections) });
201	spin_lock_init(&port->lock);
202	init_completion(&port->done);
203	list_add(&port->list, &ports);
204
205 found:
206	dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
207	if (dev == NULL) {
208		printk(KERN_ERR "Allocation of port device entry failed\n");
209		goto out;
210	}
211
212	*dev = ((struct port_dev) { .port  		= port,
213				    .helper_pid  	= -1,
214				    .telnetd_pid  	= -1 });
215	goto out;
216
217 out_close:
218	os_close_file(fd);
219 out_free:
220	kfree(port);
221 out:
222	mutex_unlock(&ports_mutex);
223	return dev;
224}
225
226int port_wait(void *data)
227{
228	struct port_dev *dev = data;
229	struct connection *conn;
230	struct port_list *port = dev->port;
231	int fd;
232
233	atomic_inc(&port->wait_count);
234	while (1) {
235		fd = -ERESTARTSYS;
236		if (wait_for_completion_interruptible(&port->done))
237			goto out;
238
239		spin_lock(&port->lock);
240
241		conn = list_entry(port->connections.next, struct connection,
242				  list);
243		list_del(&conn->list);
244		spin_unlock(&port->lock);
245
246		os_shutdown_socket(conn->socket[0], 1, 1);
247		os_close_file(conn->socket[0]);
248		os_shutdown_socket(conn->socket[1], 1, 1);
249		os_close_file(conn->socket[1]);
250
251		/* This is done here because freeing an IRQ can't be done
252		 * within the IRQ handler.  So, pipe_interrupt always ups
253		 * the semaphore regardless of whether it got a successful
254		 * connection.  Then we loop here throwing out failed
255		 * connections until a good one is found.
256		 */
257		um_free_irq(TELNETD_IRQ, conn);
258
259		if (conn->fd >= 0)
260			break;
261		os_close_file(conn->fd);
262		kfree(conn);
263	}
264
265	fd = conn->fd;
266	dev->helper_pid = conn->helper_pid;
267	dev->telnetd_pid = conn->telnetd_pid;
268	kfree(conn);
269 out:
270	atomic_dec(&port->wait_count);
271	return fd;
272}
273
274void port_remove_dev(void *d)
275{
276	struct port_dev *dev = d;
277
278	if (dev->helper_pid != -1)
279		os_kill_process(dev->helper_pid, 0);
280	if (dev->telnetd_pid != -1)
281		os_kill_process(dev->telnetd_pid, 1);
282	dev->helper_pid = -1;
283	dev->telnetd_pid = -1;
284}
285
286void port_kern_free(void *d)
287{
288	struct port_dev *dev = d;
289
290	port_remove_dev(dev);
291	kfree(dev);
292}
293
294static void free_port(void)
295{
296	struct list_head *ele;
297	struct port_list *port;
298
299	list_for_each(ele, &ports) {
300		port = list_entry(ele, struct port_list, list);
301		free_irq_by_fd(port->fd);
302		os_close_file(port->fd);
303	}
304}
305
306__uml_exitcall(free_port);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
 
  4 */
  5
  6#include <linux/completion.h>
  7#include <linux/interrupt.h>
  8#include <linux/list.h>
  9#include <linux/mutex.h>
 10#include <linux/slab.h>
 11#include <linux/workqueue.h>
 12#include <asm/atomic.h>
 13#include <init.h>
 14#include <irq_kern.h>
 15#include <os.h>
 16#include "port.h"
 17
 18struct port_list {
 19	struct list_head list;
 20	atomic_t wait_count;
 21	int has_connection;
 22	struct completion done;
 23	int port;
 24	int fd;
 25	spinlock_t lock;
 26	struct list_head pending;
 27	struct list_head connections;
 28};
 29
 30struct port_dev {
 31	struct port_list *port;
 32	int helper_pid;
 33	int telnetd_pid;
 34};
 35
 36struct connection {
 37	struct list_head list;
 38	int fd;
 39	int helper_pid;
 40	int socket[2];
 41	int telnetd_pid;
 42	struct port_list *port;
 43};
 44
 45static irqreturn_t pipe_interrupt(int irq, void *data)
 46{
 47	struct connection *conn = data;
 48	int n_fds = 1, fd = -1;
 49	ssize_t ret;
 50
 51	ret = os_rcv_fd_msg(conn->socket[0], &fd, n_fds, &conn->helper_pid,
 52			    sizeof(conn->helper_pid));
 53	if (ret != sizeof(conn->helper_pid)) {
 54		if (ret == -EAGAIN)
 55			return IRQ_NONE;
 56
 57		printk(KERN_ERR "pipe_interrupt : os_rcv_fd_msg returned %zd\n",
 58		       ret);
 59		os_close_file(conn->fd);
 60	}
 61
 62	list_del(&conn->list);
 63
 64	conn->fd = fd;
 65	list_add(&conn->list, &conn->port->connections);
 66
 67	complete(&conn->port->done);
 68	return IRQ_HANDLED;
 69}
 70
 71#define NO_WAITER_MSG \
 72    "****\n" \
 73    "There are currently no UML consoles waiting for port connections.\n" \
 74    "Either disconnect from one to make it available or activate some more\n" \
 75    "by enabling more consoles in the UML /etc/inittab.\n" \
 76    "****\n"
 77
 78static int port_accept(struct port_list *port)
 79{
 80	struct connection *conn;
 81	int fd, socket[2], pid;
 82
 83	fd = port_connection(port->fd, socket, &pid);
 84	if (fd < 0) {
 85		if (fd != -EAGAIN)
 86			printk(KERN_ERR "port_accept : port_connection "
 87			       "returned %d\n", -fd);
 88		goto out;
 89	}
 90
 91	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
 92	if (conn == NULL) {
 93		printk(KERN_ERR "port_accept : failed to allocate "
 94		       "connection\n");
 95		goto out_close;
 96	}
 97	*conn = ((struct connection)
 98		{ .list 	= LIST_HEAD_INIT(conn->list),
 99		  .fd 		= fd,
100		  .socket  	= { socket[0], socket[1] },
101		  .telnetd_pid 	= pid,
102		  .port 	= port });
103
104	if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
105			  IRQF_SHARED, "telnetd", conn) < 0) {
 
106		printk(KERN_ERR "port_accept : failed to get IRQ for "
107		       "telnetd\n");
108		goto out_free;
109	}
110
111	if (atomic_read(&port->wait_count) == 0) {
112		os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
113		printk(KERN_ERR "No one waiting for port\n");
114	}
115	list_add(&conn->list, &port->pending);
116	return 1;
117
118 out_free:
119	kfree(conn);
120 out_close:
121	os_close_file(fd);
122	os_kill_process(pid, 1);
123 out:
124	return 0;
125}
126
127static DEFINE_MUTEX(ports_mutex);
128static LIST_HEAD(ports);
129
130static void port_work_proc(struct work_struct *unused)
131{
132	struct port_list *port;
133	struct list_head *ele;
134	unsigned long flags;
135
136	local_irq_save(flags);
137	list_for_each(ele, &ports) {
138		port = list_entry(ele, struct port_list, list);
139		if (!port->has_connection)
140			continue;
141
 
142		while (port_accept(port))
143			;
144		port->has_connection = 0;
145	}
146	local_irq_restore(flags);
147}
148
149static DECLARE_WORK(port_work, port_work_proc);
150
151static irqreturn_t port_interrupt(int irq, void *data)
152{
153	struct port_list *port = data;
154
155	port->has_connection = 1;
156	schedule_work(&port_work);
157	return IRQ_HANDLED;
158}
159
160void *port_data(int port_num)
161{
162	struct list_head *ele;
163	struct port_list *port;
164	struct port_dev *dev = NULL;
165	int fd;
166
167	mutex_lock(&ports_mutex);
168	list_for_each(ele, &ports) {
169		port = list_entry(ele, struct port_list, list);
170		if (port->port == port_num)
171			goto found;
172	}
173	port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
174	if (port == NULL) {
175		printk(KERN_ERR "Allocation of port list failed\n");
176		goto out;
177	}
178
179	fd = port_listen_fd(port_num);
180	if (fd < 0) {
181		printk(KERN_ERR "binding to port %d failed, errno = %d\n",
182		       port_num, -fd);
183		goto out_free;
184	}
185
186	if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
187			  IRQF_SHARED, "port", port) < 0) {
 
188		printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
189		goto out_close;
190	}
191
192	*port = ((struct port_list)
193		{ .list 	 	= LIST_HEAD_INIT(port->list),
194		  .wait_count		= ATOMIC_INIT(0),
195		  .has_connection 	= 0,
196		  .port 	 	= port_num,
197		  .fd  			= fd,
198		  .pending 		= LIST_HEAD_INIT(port->pending),
199		  .connections 		= LIST_HEAD_INIT(port->connections) });
200	spin_lock_init(&port->lock);
201	init_completion(&port->done);
202	list_add(&port->list, &ports);
203
204 found:
205	dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
206	if (dev == NULL) {
207		printk(KERN_ERR "Allocation of port device entry failed\n");
208		goto out;
209	}
210
211	*dev = ((struct port_dev) { .port  		= port,
212				    .helper_pid  	= -1,
213				    .telnetd_pid  	= -1 });
214	goto out;
215
216 out_close:
217	os_close_file(fd);
218 out_free:
219	kfree(port);
220 out:
221	mutex_unlock(&ports_mutex);
222	return dev;
223}
224
225int port_wait(void *data)
226{
227	struct port_dev *dev = data;
228	struct connection *conn;
229	struct port_list *port = dev->port;
230	int fd;
231
232	atomic_inc(&port->wait_count);
233	while (1) {
234		fd = -ERESTARTSYS;
235		if (wait_for_completion_interruptible(&port->done))
236			goto out;
237
238		spin_lock(&port->lock);
239
240		conn = list_entry(port->connections.next, struct connection,
241				  list);
242		list_del(&conn->list);
243		spin_unlock(&port->lock);
244
245		os_shutdown_socket(conn->socket[0], 1, 1);
246		os_close_file(conn->socket[0]);
247		os_shutdown_socket(conn->socket[1], 1, 1);
248		os_close_file(conn->socket[1]);
249
250		/* This is done here because freeing an IRQ can't be done
251		 * within the IRQ handler.  So, pipe_interrupt always ups
252		 * the semaphore regardless of whether it got a successful
253		 * connection.  Then we loop here throwing out failed
254		 * connections until a good one is found.
255		 */
256		um_free_irq(TELNETD_IRQ, conn);
257
258		if (conn->fd >= 0)
259			break;
260		os_close_file(conn->fd);
261		kfree(conn);
262	}
263
264	fd = conn->fd;
265	dev->helper_pid = conn->helper_pid;
266	dev->telnetd_pid = conn->telnetd_pid;
267	kfree(conn);
268 out:
269	atomic_dec(&port->wait_count);
270	return fd;
271}
272
273void port_remove_dev(void *d)
274{
275	struct port_dev *dev = d;
276
277	if (dev->helper_pid != -1)
278		os_kill_process(dev->helper_pid, 0);
279	if (dev->telnetd_pid != -1)
280		os_kill_process(dev->telnetd_pid, 1);
281	dev->helper_pid = -1;
282	dev->telnetd_pid = -1;
283}
284
285void port_kern_free(void *d)
286{
287	struct port_dev *dev = d;
288
289	port_remove_dev(dev);
290	kfree(dev);
291}
292
293static void free_port(void)
294{
295	struct list_head *ele;
296	struct port_list *port;
297
298	list_for_each(ele, &ports) {
299		port = list_entry(ele, struct port_list, list);
300		free_irq_by_fd(port->fd);
301		os_close_file(port->fd);
302	}
303}
304
305__uml_exitcall(free_port);