Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
 
  4 */
  5
  6#include <linux/completion.h>
  7#include <linux/interrupt.h>
  8#include <linux/list.h>
  9#include <linux/mutex.h>
 10#include <linux/slab.h>
 11#include <linux/workqueue.h>
 12#include <asm/atomic.h>
 13#include <init.h>
 14#include <irq_kern.h>
 15#include <os.h>
 16#include "port.h"
 17
 18struct port_list {
 19	struct list_head list;
 20	atomic_t wait_count;
 21	int has_connection;
 22	struct completion done;
 23	int port;
 24	int fd;
 25	spinlock_t lock;
 26	struct list_head pending;
 27	struct list_head connections;
 28};
 29
 30struct port_dev {
 31	struct port_list *port;
 32	int helper_pid;
 33	int telnetd_pid;
 34};
 35
 36struct connection {
 37	struct list_head list;
 38	int fd;
 39	int helper_pid;
 40	int socket[2];
 41	int telnetd_pid;
 42	struct port_list *port;
 43};
 44
 45static irqreturn_t pipe_interrupt(int irq, void *data)
 46{
 47	struct connection *conn = data;
 48	int fd;
 49
 50	fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
 51	if (fd < 0) {
 52		if (fd == -EAGAIN)
 53			return IRQ_NONE;
 54
 55		printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
 56		       -fd);
 57		os_close_file(conn->fd);
 58	}
 59
 60	list_del(&conn->list);
 61
 62	conn->fd = fd;
 63	list_add(&conn->list, &conn->port->connections);
 64
 65	complete(&conn->port->done);
 66	return IRQ_HANDLED;
 67}
 68
 69#define NO_WAITER_MSG \
 70    "****\n" \
 71    "There are currently no UML consoles waiting for port connections.\n" \
 72    "Either disconnect from one to make it available or activate some more\n" \
 73    "by enabling more consoles in the UML /etc/inittab.\n" \
 74    "****\n"
 75
 76static int port_accept(struct port_list *port)
 77{
 78	struct connection *conn;
 79	int fd, socket[2], pid;
 80
 81	fd = port_connection(port->fd, socket, &pid);
 82	if (fd < 0) {
 83		if (fd != -EAGAIN)
 84			printk(KERN_ERR "port_accept : port_connection "
 85			       "returned %d\n", -fd);
 86		goto out;
 87	}
 88
 89	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
 90	if (conn == NULL) {
 91		printk(KERN_ERR "port_accept : failed to allocate "
 92		       "connection\n");
 93		goto out_close;
 94	}
 95	*conn = ((struct connection)
 96		{ .list 	= LIST_HEAD_INIT(conn->list),
 97		  .fd 		= fd,
 98		  .socket  	= { socket[0], socket[1] },
 99		  .telnetd_pid 	= pid,
100		  .port 	= port });
101
102	if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
103			  IRQF_SHARED, "telnetd", conn) < 0) {
 
104		printk(KERN_ERR "port_accept : failed to get IRQ for "
105		       "telnetd\n");
106		goto out_free;
107	}
108
109	if (atomic_read(&port->wait_count) == 0) {
110		os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
111		printk(KERN_ERR "No one waiting for port\n");
112	}
113	list_add(&conn->list, &port->pending);
114	return 1;
115
116 out_free:
117	kfree(conn);
118 out_close:
119	os_close_file(fd);
120	os_kill_process(pid, 1);
121 out:
122	return 0;
123}
124
125static DEFINE_MUTEX(ports_mutex);
126static LIST_HEAD(ports);
127
128static void port_work_proc(struct work_struct *unused)
129{
130	struct port_list *port;
131	struct list_head *ele;
132	unsigned long flags;
133
134	local_irq_save(flags);
135	list_for_each(ele, &ports) {
136		port = list_entry(ele, struct port_list, list);
137		if (!port->has_connection)
138			continue;
139
 
140		while (port_accept(port))
141			;
142		port->has_connection = 0;
143	}
144	local_irq_restore(flags);
145}
146
147DECLARE_WORK(port_work, port_work_proc);
148
149static irqreturn_t port_interrupt(int irq, void *data)
150{
151	struct port_list *port = data;
152
153	port->has_connection = 1;
154	schedule_work(&port_work);
155	return IRQ_HANDLED;
156}
157
158void *port_data(int port_num)
159{
160	struct list_head *ele;
161	struct port_list *port;
162	struct port_dev *dev = NULL;
163	int fd;
164
165	mutex_lock(&ports_mutex);
166	list_for_each(ele, &ports) {
167		port = list_entry(ele, struct port_list, list);
168		if (port->port == port_num)
169			goto found;
170	}
171	port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
172	if (port == NULL) {
173		printk(KERN_ERR "Allocation of port list failed\n");
174		goto out;
175	}
176
177	fd = port_listen_fd(port_num);
178	if (fd < 0) {
179		printk(KERN_ERR "binding to port %d failed, errno = %d\n",
180		       port_num, -fd);
181		goto out_free;
182	}
183
184	if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
185			  IRQF_SHARED, "port", port) < 0) {
 
186		printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
187		goto out_close;
188	}
189
190	*port = ((struct port_list)
191		{ .list 	 	= LIST_HEAD_INIT(port->list),
192		  .wait_count		= ATOMIC_INIT(0),
193		  .has_connection 	= 0,
194		  .port 	 	= port_num,
195		  .fd  			= fd,
196		  .pending 		= LIST_HEAD_INIT(port->pending),
197		  .connections 		= LIST_HEAD_INIT(port->connections) });
198	spin_lock_init(&port->lock);
199	init_completion(&port->done);
200	list_add(&port->list, &ports);
201
202 found:
203	dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
204	if (dev == NULL) {
205		printk(KERN_ERR "Allocation of port device entry failed\n");
206		goto out;
207	}
208
209	*dev = ((struct port_dev) { .port  		= port,
210				    .helper_pid  	= -1,
211				    .telnetd_pid  	= -1 });
212	goto out;
213
214 out_close:
215	os_close_file(fd);
216 out_free:
217	kfree(port);
218 out:
219	mutex_unlock(&ports_mutex);
220	return dev;
221}
222
223int port_wait(void *data)
224{
225	struct port_dev *dev = data;
226	struct connection *conn;
227	struct port_list *port = dev->port;
228	int fd;
229
230	atomic_inc(&port->wait_count);
231	while (1) {
232		fd = -ERESTARTSYS;
233		if (wait_for_completion_interruptible(&port->done))
234			goto out;
235
236		spin_lock(&port->lock);
237
238		conn = list_entry(port->connections.next, struct connection,
239				  list);
240		list_del(&conn->list);
241		spin_unlock(&port->lock);
242
243		os_shutdown_socket(conn->socket[0], 1, 1);
244		os_close_file(conn->socket[0]);
245		os_shutdown_socket(conn->socket[1], 1, 1);
246		os_close_file(conn->socket[1]);
247
248		/* This is done here because freeing an IRQ can't be done
249		 * within the IRQ handler.  So, pipe_interrupt always ups
250		 * the semaphore regardless of whether it got a successful
251		 * connection.  Then we loop here throwing out failed
252		 * connections until a good one is found.
253		 */
254		um_free_irq(TELNETD_IRQ, conn);
255
256		if (conn->fd >= 0)
257			break;
258		os_close_file(conn->fd);
259		kfree(conn);
260	}
261
262	fd = conn->fd;
263	dev->helper_pid = conn->helper_pid;
264	dev->telnetd_pid = conn->telnetd_pid;
265	kfree(conn);
266 out:
267	atomic_dec(&port->wait_count);
268	return fd;
269}
270
271void port_remove_dev(void *d)
272{
273	struct port_dev *dev = d;
274
275	if (dev->helper_pid != -1)
276		os_kill_process(dev->helper_pid, 0);
277	if (dev->telnetd_pid != -1)
278		os_kill_process(dev->telnetd_pid, 1);
279	dev->helper_pid = -1;
280	dev->telnetd_pid = -1;
281}
282
283void port_kern_free(void *d)
284{
285	struct port_dev *dev = d;
286
287	port_remove_dev(dev);
288	kfree(dev);
289}
290
291static void free_port(void)
292{
293	struct list_head *ele;
294	struct port_list *port;
295
296	list_for_each(ele, &ports) {
297		port = list_entry(ele, struct port_list, list);
298		free_irq_by_fd(port->fd);
299		os_close_file(port->fd);
300	}
301}
302
303__uml_exitcall(free_port);
v3.1
 
  1/*
  2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
  3 * Licensed under the GPL
  4 */
  5
  6#include "linux/completion.h"
  7#include "linux/interrupt.h"
  8#include "linux/list.h"
  9#include "linux/mutex.h"
 10#include "linux/slab.h"
 11#include "linux/workqueue.h"
 12#include "asm/atomic.h"
 13#include "init.h"
 14#include "irq_kern.h"
 15#include "os.h"
 16#include "port.h"
 17
 18struct port_list {
 19	struct list_head list;
 20	atomic_t wait_count;
 21	int has_connection;
 22	struct completion done;
 23	int port;
 24	int fd;
 25	spinlock_t lock;
 26	struct list_head pending;
 27	struct list_head connections;
 28};
 29
 30struct port_dev {
 31	struct port_list *port;
 32	int helper_pid;
 33	int telnetd_pid;
 34};
 35
 36struct connection {
 37	struct list_head list;
 38	int fd;
 39	int helper_pid;
 40	int socket[2];
 41	int telnetd_pid;
 42	struct port_list *port;
 43};
 44
 45static irqreturn_t pipe_interrupt(int irq, void *data)
 46{
 47	struct connection *conn = data;
 48	int fd;
 49
 50	fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
 51	if (fd < 0) {
 52		if (fd == -EAGAIN)
 53			return IRQ_NONE;
 54
 55		printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
 56		       -fd);
 57		os_close_file(conn->fd);
 58	}
 59
 60	list_del(&conn->list);
 61
 62	conn->fd = fd;
 63	list_add(&conn->list, &conn->port->connections);
 64
 65	complete(&conn->port->done);
 66	return IRQ_HANDLED;
 67}
 68
 69#define NO_WAITER_MSG \
 70    "****\n" \
 71    "There are currently no UML consoles waiting for port connections.\n" \
 72    "Either disconnect from one to make it available or activate some more\n" \
 73    "by enabling more consoles in the UML /etc/inittab.\n" \
 74    "****\n"
 75
 76static int port_accept(struct port_list *port)
 77{
 78	struct connection *conn;
 79	int fd, socket[2], pid;
 80
 81	fd = port_connection(port->fd, socket, &pid);
 82	if (fd < 0) {
 83		if (fd != -EAGAIN)
 84			printk(KERN_ERR "port_accept : port_connection "
 85			       "returned %d\n", -fd);
 86		goto out;
 87	}
 88
 89	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
 90	if (conn == NULL) {
 91		printk(KERN_ERR "port_accept : failed to allocate "
 92		       "connection\n");
 93		goto out_close;
 94	}
 95	*conn = ((struct connection)
 96		{ .list 	= LIST_HEAD_INIT(conn->list),
 97		  .fd 		= fd,
 98		  .socket  	= { socket[0], socket[1] },
 99		  .telnetd_pid 	= pid,
100		  .port 	= port });
101
102	if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
103			  IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
104			  "telnetd", conn)) {
105		printk(KERN_ERR "port_accept : failed to get IRQ for "
106		       "telnetd\n");
107		goto out_free;
108	}
109
110	if (atomic_read(&port->wait_count) == 0) {
111		os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
112		printk(KERN_ERR "No one waiting for port\n");
113	}
114	list_add(&conn->list, &port->pending);
115	return 1;
116
117 out_free:
118	kfree(conn);
119 out_close:
120	os_close_file(fd);
121	os_kill_process(pid, 1);
122 out:
123	return 0;
124}
125
126static DEFINE_MUTEX(ports_mutex);
127static LIST_HEAD(ports);
128
129static void port_work_proc(struct work_struct *unused)
130{
131	struct port_list *port;
132	struct list_head *ele;
133	unsigned long flags;
134
135	local_irq_save(flags);
136	list_for_each(ele, &ports) {
137		port = list_entry(ele, struct port_list, list);
138		if (!port->has_connection)
139			continue;
140
141		reactivate_fd(port->fd, ACCEPT_IRQ);
142		while (port_accept(port))
143			;
144		port->has_connection = 0;
145	}
146	local_irq_restore(flags);
147}
148
149DECLARE_WORK(port_work, port_work_proc);
150
151static irqreturn_t port_interrupt(int irq, void *data)
152{
153	struct port_list *port = data;
154
155	port->has_connection = 1;
156	schedule_work(&port_work);
157	return IRQ_HANDLED;
158}
159
160void *port_data(int port_num)
161{
162	struct list_head *ele;
163	struct port_list *port;
164	struct port_dev *dev = NULL;
165	int fd;
166
167	mutex_lock(&ports_mutex);
168	list_for_each(ele, &ports) {
169		port = list_entry(ele, struct port_list, list);
170		if (port->port == port_num)
171			goto found;
172	}
173	port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
174	if (port == NULL) {
175		printk(KERN_ERR "Allocation of port list failed\n");
176		goto out;
177	}
178
179	fd = port_listen_fd(port_num);
180	if (fd < 0) {
181		printk(KERN_ERR "binding to port %d failed, errno = %d\n",
182		       port_num, -fd);
183		goto out_free;
184	}
185
186	if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
187			  IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
188			  "port", port)) {
189		printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
190		goto out_close;
191	}
192
193	*port = ((struct port_list)
194		{ .list 	 	= LIST_HEAD_INIT(port->list),
195		  .wait_count		= ATOMIC_INIT(0),
196		  .has_connection 	= 0,
197		  .port 	 	= port_num,
198		  .fd  			= fd,
199		  .pending 		= LIST_HEAD_INIT(port->pending),
200		  .connections 		= LIST_HEAD_INIT(port->connections) });
201	spin_lock_init(&port->lock);
202	init_completion(&port->done);
203	list_add(&port->list, &ports);
204
205 found:
206	dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
207	if (dev == NULL) {
208		printk(KERN_ERR "Allocation of port device entry failed\n");
209		goto out;
210	}
211
212	*dev = ((struct port_dev) { .port  		= port,
213				    .helper_pid  	= -1,
214				    .telnetd_pid  	= -1 });
215	goto out;
216
217 out_close:
218	os_close_file(fd);
219 out_free:
220	kfree(port);
221 out:
222	mutex_unlock(&ports_mutex);
223	return dev;
224}
225
226int port_wait(void *data)
227{
228	struct port_dev *dev = data;
229	struct connection *conn;
230	struct port_list *port = dev->port;
231	int fd;
232
233	atomic_inc(&port->wait_count);
234	while (1) {
235		fd = -ERESTARTSYS;
236		if (wait_for_completion_interruptible(&port->done))
237			goto out;
238
239		spin_lock(&port->lock);
240
241		conn = list_entry(port->connections.next, struct connection,
242				  list);
243		list_del(&conn->list);
244		spin_unlock(&port->lock);
245
246		os_shutdown_socket(conn->socket[0], 1, 1);
247		os_close_file(conn->socket[0]);
248		os_shutdown_socket(conn->socket[1], 1, 1);
249		os_close_file(conn->socket[1]);
250
251		/* This is done here because freeing an IRQ can't be done
252		 * within the IRQ handler.  So, pipe_interrupt always ups
253		 * the semaphore regardless of whether it got a successful
254		 * connection.  Then we loop here throwing out failed
255		 * connections until a good one is found.
256		 */
257		free_irq(TELNETD_IRQ, conn);
258
259		if (conn->fd >= 0)
260			break;
261		os_close_file(conn->fd);
262		kfree(conn);
263	}
264
265	fd = conn->fd;
266	dev->helper_pid = conn->helper_pid;
267	dev->telnetd_pid = conn->telnetd_pid;
268	kfree(conn);
269 out:
270	atomic_dec(&port->wait_count);
271	return fd;
272}
273
274void port_remove_dev(void *d)
275{
276	struct port_dev *dev = d;
277
278	if (dev->helper_pid != -1)
279		os_kill_process(dev->helper_pid, 0);
280	if (dev->telnetd_pid != -1)
281		os_kill_process(dev->telnetd_pid, 1);
282	dev->helper_pid = -1;
283	dev->telnetd_pid = -1;
284}
285
286void port_kern_free(void *d)
287{
288	struct port_dev *dev = d;
289
290	port_remove_dev(dev);
291	kfree(dev);
292}
293
294static void free_port(void)
295{
296	struct list_head *ele;
297	struct port_list *port;
298
299	list_for_each(ele, &ports) {
300		port = list_entry(ele, struct port_list, list);
301		free_irq_by_fd(port->fd);
302		os_close_file(port->fd);
303	}
304}
305
306__uml_exitcall(free_port);