Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#include <unistd.h>
7#include <errno.h>
8#include <fcntl.h>
9#include <poll.h>
10#include <pty.h>
11#include <sched.h>
12#include <signal.h>
13#include <string.h>
14#include <kern_util.h>
15#include <init.h>
16#include <os.h>
17#include <sigio.h>
18#include <um_malloc.h>
19
20/*
21 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
22 * exitcall.
23 */
24static int write_sigio_pid = -1;
25static unsigned long write_sigio_stack;
26
27/*
28 * These arrays are initialized before the sigio thread is started, and
29 * the descriptors closed after it is killed. So, it can't see them change.
30 * On the UML side, they are changed under the sigio_lock.
31 */
32#define SIGIO_FDS_INIT {-1, -1}
33
34static int write_sigio_fds[2] = SIGIO_FDS_INIT;
35static int sigio_private[2] = SIGIO_FDS_INIT;
36
37struct pollfds {
38 struct pollfd *poll;
39 int size;
40 int used;
41};
42
43/*
44 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
45 * synchronizes with it.
46 */
47static struct pollfds current_poll;
48static struct pollfds next_poll;
49static struct pollfds all_sigio_fds;
50
51static int write_sigio_thread(void *unused)
52{
53 struct pollfds *fds, tmp;
54 struct pollfd *p;
55 int i, n, respond_fd;
56 char c;
57
58 os_set_pdeathsig();
59 os_fix_helper_signals();
60 fds = ¤t_poll;
61 while (1) {
62 n = poll(fds->poll, fds->used, -1);
63 if (n < 0) {
64 if (errno == EINTR)
65 continue;
66 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
67 "%d, errno = %d\n", n, errno);
68 }
69 for (i = 0; i < fds->used; i++) {
70 p = &fds->poll[i];
71 if (p->revents == 0)
72 continue;
73 if (p->fd == sigio_private[1]) {
74 CATCH_EINTR(n = read(sigio_private[1], &c,
75 sizeof(c)));
76 if (n != sizeof(c))
77 printk(UM_KERN_ERR
78 "write_sigio_thread : "
79 "read on socket failed, "
80 "err = %d\n", errno);
81 tmp = current_poll;
82 current_poll = next_poll;
83 next_poll = tmp;
84 respond_fd = sigio_private[1];
85 }
86 else {
87 respond_fd = write_sigio_fds[1];
88 fds->used--;
89 memmove(&fds->poll[i], &fds->poll[i + 1],
90 (fds->used - i) * sizeof(*fds->poll));
91 }
92
93 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
94 if (n != sizeof(c))
95 printk(UM_KERN_ERR "write_sigio_thread : "
96 "write on socket failed, err = %d\n",
97 errno);
98 }
99 }
100
101 return 0;
102}
103
104static int need_poll(struct pollfds *polls, int n)
105{
106 struct pollfd *new;
107
108 if (n <= polls->size)
109 return 0;
110
111 new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
112 if (new == NULL) {
113 printk(UM_KERN_ERR "need_poll : failed to allocate new "
114 "pollfds\n");
115 return -ENOMEM;
116 }
117
118 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
119 kfree(polls->poll);
120
121 polls->poll = new;
122 polls->size = n;
123 return 0;
124}
125
126/*
127 * Must be called with sigio_lock held, because it's needed by the marked
128 * critical section.
129 */
130static void update_thread(void)
131{
132 unsigned long flags;
133 int n;
134 char c;
135
136 flags = um_set_signals_trace(0);
137 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
138 if (n != sizeof(c)) {
139 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
140 errno);
141 goto fail;
142 }
143
144 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
145 if (n != sizeof(c)) {
146 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
147 errno);
148 goto fail;
149 }
150
151 um_set_signals_trace(flags);
152 return;
153 fail:
154 /* Critical section start */
155 if (write_sigio_pid != -1) {
156 os_kill_process(write_sigio_pid, 1);
157 free_stack(write_sigio_stack, 0);
158 }
159 write_sigio_pid = -1;
160 close(sigio_private[0]);
161 close(sigio_private[1]);
162 close(write_sigio_fds[0]);
163 close(write_sigio_fds[1]);
164 /* Critical section end */
165 um_set_signals_trace(flags);
166}
167
168int __add_sigio_fd(int fd)
169{
170 struct pollfd *p;
171 int err, i, n;
172
173 for (i = 0; i < all_sigio_fds.used; i++) {
174 if (all_sigio_fds.poll[i].fd == fd)
175 break;
176 }
177 if (i == all_sigio_fds.used)
178 return -ENOSPC;
179
180 p = &all_sigio_fds.poll[i];
181
182 for (i = 0; i < current_poll.used; i++) {
183 if (current_poll.poll[i].fd == fd)
184 return 0;
185 }
186
187 n = current_poll.used;
188 err = need_poll(&next_poll, n + 1);
189 if (err)
190 return err;
191
192 memcpy(next_poll.poll, current_poll.poll,
193 current_poll.used * sizeof(struct pollfd));
194 next_poll.poll[n] = *p;
195 next_poll.used = n + 1;
196 update_thread();
197
198 return 0;
199}
200
201
202int add_sigio_fd(int fd)
203{
204 int err;
205
206 sigio_lock();
207 err = __add_sigio_fd(fd);
208 sigio_unlock();
209
210 return err;
211}
212
213int __ignore_sigio_fd(int fd)
214{
215 struct pollfd *p;
216 int err, i, n = 0;
217
218 /*
219 * This is called from exitcalls elsewhere in UML - if
220 * sigio_cleanup has already run, then update_thread will hang
221 * or fail because the thread is no longer running.
222 */
223 if (write_sigio_pid == -1)
224 return -EIO;
225
226 for (i = 0; i < current_poll.used; i++) {
227 if (current_poll.poll[i].fd == fd)
228 break;
229 }
230 if (i == current_poll.used)
231 return -ENOENT;
232
233 err = need_poll(&next_poll, current_poll.used - 1);
234 if (err)
235 return err;
236
237 for (i = 0; i < current_poll.used; i++) {
238 p = ¤t_poll.poll[i];
239 if (p->fd != fd)
240 next_poll.poll[n++] = *p;
241 }
242 next_poll.used = current_poll.used - 1;
243
244 update_thread();
245
246 return 0;
247}
248
249int ignore_sigio_fd(int fd)
250{
251 int err;
252
253 sigio_lock();
254 err = __ignore_sigio_fd(fd);
255 sigio_unlock();
256
257 return err;
258}
259
260static struct pollfd *setup_initial_poll(int fd)
261{
262 struct pollfd *p;
263
264 p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
265 if (p == NULL) {
266 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
267 "poll\n");
268 return NULL;
269 }
270 *p = ((struct pollfd) { .fd = fd,
271 .events = POLLIN,
272 .revents = 0 });
273 return p;
274}
275
276static void write_sigio_workaround(void)
277{
278 struct pollfd *p;
279 int err;
280 int l_write_sigio_fds[2];
281 int l_sigio_private[2];
282 int l_write_sigio_pid;
283
284 /* We call this *tons* of times - and most ones we must just fail. */
285 sigio_lock();
286 l_write_sigio_pid = write_sigio_pid;
287 sigio_unlock();
288
289 if (l_write_sigio_pid != -1)
290 return;
291
292 err = os_pipe(l_write_sigio_fds, 1, 1);
293 if (err < 0) {
294 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
295 "err = %d\n", -err);
296 return;
297 }
298 err = os_pipe(l_sigio_private, 1, 1);
299 if (err < 0) {
300 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
301 "err = %d\n", -err);
302 goto out_close1;
303 }
304
305 p = setup_initial_poll(l_sigio_private[1]);
306 if (!p)
307 goto out_close2;
308
309 sigio_lock();
310
311 /*
312 * Did we race? Don't try to optimize this, please, it's not so likely
313 * to happen, and no more than once at the boot.
314 */
315 if (write_sigio_pid != -1)
316 goto out_free;
317
318 current_poll = ((struct pollfds) { .poll = p,
319 .used = 1,
320 .size = 1 });
321
322 if (write_sigio_irq(l_write_sigio_fds[0]))
323 goto out_clear_poll;
324
325 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
326 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
327
328 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
329 CLONE_FILES | CLONE_VM,
330 &write_sigio_stack);
331
332 if (write_sigio_pid < 0)
333 goto out_clear;
334
335 sigio_unlock();
336 return;
337
338out_clear:
339 write_sigio_pid = -1;
340 write_sigio_fds[0] = -1;
341 write_sigio_fds[1] = -1;
342 sigio_private[0] = -1;
343 sigio_private[1] = -1;
344out_clear_poll:
345 current_poll = ((struct pollfds) { .poll = NULL,
346 .size = 0,
347 .used = 0 });
348out_free:
349 sigio_unlock();
350 kfree(p);
351out_close2:
352 close(l_sigio_private[0]);
353 close(l_sigio_private[1]);
354out_close1:
355 close(l_write_sigio_fds[0]);
356 close(l_write_sigio_fds[1]);
357}
358
359void sigio_broken(int fd)
360{
361 int err;
362
363 write_sigio_workaround();
364
365 sigio_lock();
366 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
367 if (err) {
368 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
369 "for descriptor %d\n", fd);
370 goto out;
371 }
372
373 all_sigio_fds.poll[all_sigio_fds.used++] =
374 ((struct pollfd) { .fd = fd,
375 .events = POLLIN,
376 .revents = 0 });
377out:
378 sigio_unlock();
379}
380
381/* Changed during early boot */
382static int pty_output_sigio;
383
384void maybe_sigio_broken(int fd)
385{
386 if (!isatty(fd))
387 return;
388
389 if (pty_output_sigio)
390 return;
391
392 sigio_broken(fd);
393}
394
395static void sigio_cleanup(void)
396{
397 if (write_sigio_pid == -1)
398 return;
399
400 os_kill_process(write_sigio_pid, 1);
401 free_stack(write_sigio_stack, 0);
402 write_sigio_pid = -1;
403}
404
405__uml_exitcall(sigio_cleanup);
406
407/* Used as a flag during SIGIO testing early in boot */
408static int got_sigio;
409
410static void __init handler(int sig)
411{
412 got_sigio = 1;
413}
414
415struct openpty_arg {
416 int master;
417 int slave;
418 int err;
419};
420
421static void openpty_cb(void *arg)
422{
423 struct openpty_arg *info = arg;
424
425 info->err = 0;
426 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
427 info->err = -errno;
428}
429
430static int async_pty(int master, int slave)
431{
432 int flags;
433
434 flags = fcntl(master, F_GETFL);
435 if (flags < 0)
436 return -errno;
437
438 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
439 (fcntl(master, F_SETOWN, os_getpid()) < 0))
440 return -errno;
441
442 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
443 return -errno;
444
445 return 0;
446}
447
448static void __init check_one_sigio(void (*proc)(int, int))
449{
450 struct sigaction old, new;
451 struct openpty_arg pty = { .master = -1, .slave = -1 };
452 int master, slave, err;
453
454 initial_thread_cb(openpty_cb, &pty);
455 if (pty.err) {
456 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
457 -pty.err);
458 return;
459 }
460
461 master = pty.master;
462 slave = pty.slave;
463
464 if ((master == -1) || (slave == -1)) {
465 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
466 "pty\n");
467 return;
468 }
469
470 /* Not now, but complain so we now where we failed. */
471 err = raw(master);
472 if (err < 0) {
473 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
474 -err);
475 return;
476 }
477
478 err = async_pty(master, slave);
479 if (err < 0) {
480 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
481 "err = %d\n", -err);
482 return;
483 }
484
485 if (sigaction(SIGIO, NULL, &old) < 0) {
486 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
487 "errno = %d\n", errno);
488 return;
489 }
490
491 new = old;
492 new.sa_handler = handler;
493 if (sigaction(SIGIO, &new, NULL) < 0) {
494 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
495 "errno = %d\n", errno);
496 return;
497 }
498
499 got_sigio = 0;
500 (*proc)(master, slave);
501
502 close(master);
503 close(slave);
504
505 if (sigaction(SIGIO, &old, NULL) < 0)
506 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
507 "errno = %d\n", errno);
508}
509
510static void tty_output(int master, int slave)
511{
512 int n;
513 char buf[512];
514
515 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
516
517 memset(buf, 0, sizeof(buf));
518
519 while (write(master, buf, sizeof(buf)) > 0) ;
520 if (errno != EAGAIN)
521 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
522 errno);
523 while (((n = read(slave, buf, sizeof(buf))) > 0) &&
524 !({ barrier(); got_sigio; }))
525 ;
526
527 if (got_sigio) {
528 printk(UM_KERN_CONT "Yes\n");
529 pty_output_sigio = 1;
530 } else if (n == -EAGAIN)
531 printk(UM_KERN_CONT "No, enabling workaround\n");
532 else
533 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
534}
535
536static void __init check_sigio(void)
537{
538 if ((access("/dev/ptmx", R_OK) < 0) &&
539 (access("/dev/ptyp0", R_OK) < 0)) {
540 printk(UM_KERN_WARNING "No pseudo-terminals available - "
541 "skipping pty SIGIO check\n");
542 return;
543 }
544 check_one_sigio(tty_output);
545}
546
547/* Here because it only does the SIGIO testing for now */
548void __init os_check_bugs(void)
549{
550 check_sigio();
551}
1/*
2 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <unistd.h>
7#include <errno.h>
8#include <fcntl.h>
9#include <poll.h>
10#include <pty.h>
11#include <sched.h>
12#include <signal.h>
13#include <string.h>
14#include "kern_constants.h"
15#include "kern_util.h"
16#include "init.h"
17#include "os.h"
18#include "process.h"
19#include "sigio.h"
20#include "um_malloc.h"
21#include "user.h"
22
23/*
24 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
25 * exitcall.
26 */
27static int write_sigio_pid = -1;
28static unsigned long write_sigio_stack;
29
30/*
31 * These arrays are initialized before the sigio thread is started, and
32 * the descriptors closed after it is killed. So, it can't see them change.
33 * On the UML side, they are changed under the sigio_lock.
34 */
35#define SIGIO_FDS_INIT {-1, -1}
36
37static int write_sigio_fds[2] = SIGIO_FDS_INIT;
38static int sigio_private[2] = SIGIO_FDS_INIT;
39
40struct pollfds {
41 struct pollfd *poll;
42 int size;
43 int used;
44};
45
46/*
47 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
48 * synchronizes with it.
49 */
50static struct pollfds current_poll;
51static struct pollfds next_poll;
52static struct pollfds all_sigio_fds;
53
54static int write_sigio_thread(void *unused)
55{
56 struct pollfds *fds, tmp;
57 struct pollfd *p;
58 int i, n, respond_fd;
59 char c;
60
61 signal(SIGWINCH, SIG_IGN);
62 fds = ¤t_poll;
63 while (1) {
64 n = poll(fds->poll, fds->used, -1);
65 if (n < 0) {
66 if (errno == EINTR)
67 continue;
68 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
69 "%d, errno = %d\n", n, errno);
70 }
71 for (i = 0; i < fds->used; i++) {
72 p = &fds->poll[i];
73 if (p->revents == 0)
74 continue;
75 if (p->fd == sigio_private[1]) {
76 CATCH_EINTR(n = read(sigio_private[1], &c,
77 sizeof(c)));
78 if (n != sizeof(c))
79 printk(UM_KERN_ERR
80 "write_sigio_thread : "
81 "read on socket failed, "
82 "err = %d\n", errno);
83 tmp = current_poll;
84 current_poll = next_poll;
85 next_poll = tmp;
86 respond_fd = sigio_private[1];
87 }
88 else {
89 respond_fd = write_sigio_fds[1];
90 fds->used--;
91 memmove(&fds->poll[i], &fds->poll[i + 1],
92 (fds->used - i) * sizeof(*fds->poll));
93 }
94
95 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
96 if (n != sizeof(c))
97 printk(UM_KERN_ERR "write_sigio_thread : "
98 "write on socket failed, err = %d\n",
99 errno);
100 }
101 }
102
103 return 0;
104}
105
106static int need_poll(struct pollfds *polls, int n)
107{
108 struct pollfd *new;
109
110 if (n <= polls->size)
111 return 0;
112
113 new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
114 if (new == NULL) {
115 printk(UM_KERN_ERR "need_poll : failed to allocate new "
116 "pollfds\n");
117 return -ENOMEM;
118 }
119
120 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
121 kfree(polls->poll);
122
123 polls->poll = new;
124 polls->size = n;
125 return 0;
126}
127
128/*
129 * Must be called with sigio_lock held, because it's needed by the marked
130 * critical section.
131 */
132static void update_thread(void)
133{
134 unsigned long flags;
135 int n;
136 char c;
137
138 flags = set_signals(0);
139 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
140 if (n != sizeof(c)) {
141 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
142 errno);
143 goto fail;
144 }
145
146 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
147 if (n != sizeof(c)) {
148 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
149 errno);
150 goto fail;
151 }
152
153 set_signals(flags);
154 return;
155 fail:
156 /* Critical section start */
157 if (write_sigio_pid != -1) {
158 os_kill_process(write_sigio_pid, 1);
159 free_stack(write_sigio_stack, 0);
160 }
161 write_sigio_pid = -1;
162 close(sigio_private[0]);
163 close(sigio_private[1]);
164 close(write_sigio_fds[0]);
165 close(write_sigio_fds[1]);
166 /* Critical section end */
167 set_signals(flags);
168}
169
170int add_sigio_fd(int fd)
171{
172 struct pollfd *p;
173 int err = 0, i, n;
174
175 sigio_lock();
176 for (i = 0; i < all_sigio_fds.used; i++) {
177 if (all_sigio_fds.poll[i].fd == fd)
178 break;
179 }
180 if (i == all_sigio_fds.used)
181 goto out;
182
183 p = &all_sigio_fds.poll[i];
184
185 for (i = 0; i < current_poll.used; i++) {
186 if (current_poll.poll[i].fd == fd)
187 goto out;
188 }
189
190 n = current_poll.used;
191 err = need_poll(&next_poll, n + 1);
192 if (err)
193 goto out;
194
195 memcpy(next_poll.poll, current_poll.poll,
196 current_poll.used * sizeof(struct pollfd));
197 next_poll.poll[n] = *p;
198 next_poll.used = n + 1;
199 update_thread();
200 out:
201 sigio_unlock();
202 return err;
203}
204
205int ignore_sigio_fd(int fd)
206{
207 struct pollfd *p;
208 int err = 0, i, n = 0;
209
210 /*
211 * This is called from exitcalls elsewhere in UML - if
212 * sigio_cleanup has already run, then update_thread will hang
213 * or fail because the thread is no longer running.
214 */
215 if (write_sigio_pid == -1)
216 return -EIO;
217
218 sigio_lock();
219 for (i = 0; i < current_poll.used; i++) {
220 if (current_poll.poll[i].fd == fd)
221 break;
222 }
223 if (i == current_poll.used)
224 goto out;
225
226 err = need_poll(&next_poll, current_poll.used - 1);
227 if (err)
228 goto out;
229
230 for (i = 0; i < current_poll.used; i++) {
231 p = ¤t_poll.poll[i];
232 if (p->fd != fd)
233 next_poll.poll[n++] = *p;
234 }
235 next_poll.used = current_poll.used - 1;
236
237 update_thread();
238 out:
239 sigio_unlock();
240 return err;
241}
242
243static struct pollfd *setup_initial_poll(int fd)
244{
245 struct pollfd *p;
246
247 p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
248 if (p == NULL) {
249 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
250 "poll\n");
251 return NULL;
252 }
253 *p = ((struct pollfd) { .fd = fd,
254 .events = POLLIN,
255 .revents = 0 });
256 return p;
257}
258
259static void write_sigio_workaround(void)
260{
261 struct pollfd *p;
262 int err;
263 int l_write_sigio_fds[2];
264 int l_sigio_private[2];
265 int l_write_sigio_pid;
266
267 /* We call this *tons* of times - and most ones we must just fail. */
268 sigio_lock();
269 l_write_sigio_pid = write_sigio_pid;
270 sigio_unlock();
271
272 if (l_write_sigio_pid != -1)
273 return;
274
275 err = os_pipe(l_write_sigio_fds, 1, 1);
276 if (err < 0) {
277 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
278 "err = %d\n", -err);
279 return;
280 }
281 err = os_pipe(l_sigio_private, 1, 1);
282 if (err < 0) {
283 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
284 "err = %d\n", -err);
285 goto out_close1;
286 }
287
288 p = setup_initial_poll(l_sigio_private[1]);
289 if (!p)
290 goto out_close2;
291
292 sigio_lock();
293
294 /*
295 * Did we race? Don't try to optimize this, please, it's not so likely
296 * to happen, and no more than once at the boot.
297 */
298 if (write_sigio_pid != -1)
299 goto out_free;
300
301 current_poll = ((struct pollfds) { .poll = p,
302 .used = 1,
303 .size = 1 });
304
305 if (write_sigio_irq(l_write_sigio_fds[0]))
306 goto out_clear_poll;
307
308 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
309 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
310
311 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
312 CLONE_FILES | CLONE_VM,
313 &write_sigio_stack);
314
315 if (write_sigio_pid < 0)
316 goto out_clear;
317
318 sigio_unlock();
319 return;
320
321out_clear:
322 write_sigio_pid = -1;
323 write_sigio_fds[0] = -1;
324 write_sigio_fds[1] = -1;
325 sigio_private[0] = -1;
326 sigio_private[1] = -1;
327out_clear_poll:
328 current_poll = ((struct pollfds) { .poll = NULL,
329 .size = 0,
330 .used = 0 });
331out_free:
332 sigio_unlock();
333 kfree(p);
334out_close2:
335 close(l_sigio_private[0]);
336 close(l_sigio_private[1]);
337out_close1:
338 close(l_write_sigio_fds[0]);
339 close(l_write_sigio_fds[1]);
340}
341
342void sigio_broken(int fd, int read)
343{
344 int err;
345
346 write_sigio_workaround();
347
348 sigio_lock();
349 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
350 if (err) {
351 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
352 "for descriptor %d\n", fd);
353 goto out;
354 }
355
356 all_sigio_fds.poll[all_sigio_fds.used++] =
357 ((struct pollfd) { .fd = fd,
358 .events = read ? POLLIN : POLLOUT,
359 .revents = 0 });
360out:
361 sigio_unlock();
362}
363
364/* Changed during early boot */
365static int pty_output_sigio;
366static int pty_close_sigio;
367
368void maybe_sigio_broken(int fd, int read)
369{
370 if (!isatty(fd))
371 return;
372
373 if ((read || pty_output_sigio) && (!read || pty_close_sigio))
374 return;
375
376 sigio_broken(fd, read);
377}
378
379static void sigio_cleanup(void)
380{
381 if (write_sigio_pid == -1)
382 return;
383
384 os_kill_process(write_sigio_pid, 1);
385 free_stack(write_sigio_stack, 0);
386 write_sigio_pid = -1;
387}
388
389__uml_exitcall(sigio_cleanup);
390
391/* Used as a flag during SIGIO testing early in boot */
392static int got_sigio;
393
394static void __init handler(int sig)
395{
396 got_sigio = 1;
397}
398
399struct openpty_arg {
400 int master;
401 int slave;
402 int err;
403};
404
405static void openpty_cb(void *arg)
406{
407 struct openpty_arg *info = arg;
408
409 info->err = 0;
410 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
411 info->err = -errno;
412}
413
414static int async_pty(int master, int slave)
415{
416 int flags;
417
418 flags = fcntl(master, F_GETFL);
419 if (flags < 0)
420 return -errno;
421
422 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
423 (fcntl(master, F_SETOWN, os_getpid()) < 0))
424 return -errno;
425
426 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
427 return -errno;
428
429 return 0;
430}
431
432static void __init check_one_sigio(void (*proc)(int, int))
433{
434 struct sigaction old, new;
435 struct openpty_arg pty = { .master = -1, .slave = -1 };
436 int master, slave, err;
437
438 initial_thread_cb(openpty_cb, &pty);
439 if (pty.err) {
440 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
441 -pty.err);
442 return;
443 }
444
445 master = pty.master;
446 slave = pty.slave;
447
448 if ((master == -1) || (slave == -1)) {
449 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
450 "pty\n");
451 return;
452 }
453
454 /* Not now, but complain so we now where we failed. */
455 err = raw(master);
456 if (err < 0) {
457 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
458 -err);
459 return;
460 }
461
462 err = async_pty(master, slave);
463 if (err < 0) {
464 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
465 "err = %d\n", -err);
466 return;
467 }
468
469 if (sigaction(SIGIO, NULL, &old) < 0) {
470 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
471 "errno = %d\n", errno);
472 return;
473 }
474
475 new = old;
476 new.sa_handler = handler;
477 if (sigaction(SIGIO, &new, NULL) < 0) {
478 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
479 "errno = %d\n", errno);
480 return;
481 }
482
483 got_sigio = 0;
484 (*proc)(master, slave);
485
486 close(master);
487 close(slave);
488
489 if (sigaction(SIGIO, &old, NULL) < 0)
490 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
491 "errno = %d\n", errno);
492}
493
494static void tty_output(int master, int slave)
495{
496 int n;
497 char buf[512];
498
499 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
500
501 memset(buf, 0, sizeof(buf));
502
503 while (write(master, buf, sizeof(buf)) > 0) ;
504 if (errno != EAGAIN)
505 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
506 errno);
507 while (((n = read(slave, buf, sizeof(buf))) > 0) &&
508 !({ barrier(); got_sigio; }))
509 ;
510
511 if (got_sigio) {
512 printk(UM_KERN_CONT "Yes\n");
513 pty_output_sigio = 1;
514 } else if (n == -EAGAIN)
515 printk(UM_KERN_CONT "No, enabling workaround\n");
516 else
517 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
518}
519
520static void tty_close(int master, int slave)
521{
522 printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
523 "close...");
524
525 close(slave);
526 if (got_sigio) {
527 printk(UM_KERN_CONT "Yes\n");
528 pty_close_sigio = 1;
529 } else
530 printk(UM_KERN_CONT "No, enabling workaround\n");
531}
532
533static void __init check_sigio(void)
534{
535 if ((access("/dev/ptmx", R_OK) < 0) &&
536 (access("/dev/ptyp0", R_OK) < 0)) {
537 printk(UM_KERN_WARNING "No pseudo-terminals available - "
538 "skipping pty SIGIO check\n");
539 return;
540 }
541 check_one_sigio(tty_output);
542 check_one_sigio(tty_close);
543}
544
545/* Here because it only does the SIGIO testing for now */
546void __init os_check_bugs(void)
547{
548 check_sigio();
549}