Loading...
1/*
2 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <unistd.h>
7#include <errno.h>
8#include <fcntl.h>
9#include <poll.h>
10#include <pty.h>
11#include <sched.h>
12#include <signal.h>
13#include <string.h>
14#include "kern_util.h"
15#include "init.h"
16#include "os.h"
17#include "sigio.h"
18#include "um_malloc.h"
19
20/*
21 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
22 * exitcall.
23 */
24static int write_sigio_pid = -1;
25static unsigned long write_sigio_stack;
26
27/*
28 * These arrays are initialized before the sigio thread is started, and
29 * the descriptors closed after it is killed. So, it can't see them change.
30 * On the UML side, they are changed under the sigio_lock.
31 */
32#define SIGIO_FDS_INIT {-1, -1}
33
34static int write_sigio_fds[2] = SIGIO_FDS_INIT;
35static int sigio_private[2] = SIGIO_FDS_INIT;
36
37struct pollfds {
38 struct pollfd *poll;
39 int size;
40 int used;
41};
42
43/*
44 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
45 * synchronizes with it.
46 */
47static struct pollfds current_poll;
48static struct pollfds next_poll;
49static struct pollfds all_sigio_fds;
50
51static int write_sigio_thread(void *unused)
52{
53 struct pollfds *fds, tmp;
54 struct pollfd *p;
55 int i, n, respond_fd;
56 char c;
57
58 signal(SIGWINCH, SIG_IGN);
59 fds = ¤t_poll;
60 while (1) {
61 n = poll(fds->poll, fds->used, -1);
62 if (n < 0) {
63 if (errno == EINTR)
64 continue;
65 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
66 "%d, errno = %d\n", n, errno);
67 }
68 for (i = 0; i < fds->used; i++) {
69 p = &fds->poll[i];
70 if (p->revents == 0)
71 continue;
72 if (p->fd == sigio_private[1]) {
73 CATCH_EINTR(n = read(sigio_private[1], &c,
74 sizeof(c)));
75 if (n != sizeof(c))
76 printk(UM_KERN_ERR
77 "write_sigio_thread : "
78 "read on socket failed, "
79 "err = %d\n", errno);
80 tmp = current_poll;
81 current_poll = next_poll;
82 next_poll = tmp;
83 respond_fd = sigio_private[1];
84 }
85 else {
86 respond_fd = write_sigio_fds[1];
87 fds->used--;
88 memmove(&fds->poll[i], &fds->poll[i + 1],
89 (fds->used - i) * sizeof(*fds->poll));
90 }
91
92 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
93 if (n != sizeof(c))
94 printk(UM_KERN_ERR "write_sigio_thread : "
95 "write on socket failed, err = %d\n",
96 errno);
97 }
98 }
99
100 return 0;
101}
102
103static int need_poll(struct pollfds *polls, int n)
104{
105 struct pollfd *new;
106
107 if (n <= polls->size)
108 return 0;
109
110 new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
111 if (new == NULL) {
112 printk(UM_KERN_ERR "need_poll : failed to allocate new "
113 "pollfds\n");
114 return -ENOMEM;
115 }
116
117 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
118 kfree(polls->poll);
119
120 polls->poll = new;
121 polls->size = n;
122 return 0;
123}
124
125/*
126 * Must be called with sigio_lock held, because it's needed by the marked
127 * critical section.
128 */
129static void update_thread(void)
130{
131 unsigned long flags;
132 int n;
133 char c;
134
135 flags = set_signals(0);
136 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
137 if (n != sizeof(c)) {
138 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
139 errno);
140 goto fail;
141 }
142
143 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
144 if (n != sizeof(c)) {
145 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
146 errno);
147 goto fail;
148 }
149
150 set_signals(flags);
151 return;
152 fail:
153 /* Critical section start */
154 if (write_sigio_pid != -1) {
155 os_kill_process(write_sigio_pid, 1);
156 free_stack(write_sigio_stack, 0);
157 }
158 write_sigio_pid = -1;
159 close(sigio_private[0]);
160 close(sigio_private[1]);
161 close(write_sigio_fds[0]);
162 close(write_sigio_fds[1]);
163 /* Critical section end */
164 set_signals(flags);
165}
166
167int add_sigio_fd(int fd)
168{
169 struct pollfd *p;
170 int err = 0, i, n;
171
172 sigio_lock();
173 for (i = 0; i < all_sigio_fds.used; i++) {
174 if (all_sigio_fds.poll[i].fd == fd)
175 break;
176 }
177 if (i == all_sigio_fds.used)
178 goto out;
179
180 p = &all_sigio_fds.poll[i];
181
182 for (i = 0; i < current_poll.used; i++) {
183 if (current_poll.poll[i].fd == fd)
184 goto out;
185 }
186
187 n = current_poll.used;
188 err = need_poll(&next_poll, n + 1);
189 if (err)
190 goto out;
191
192 memcpy(next_poll.poll, current_poll.poll,
193 current_poll.used * sizeof(struct pollfd));
194 next_poll.poll[n] = *p;
195 next_poll.used = n + 1;
196 update_thread();
197 out:
198 sigio_unlock();
199 return err;
200}
201
202int ignore_sigio_fd(int fd)
203{
204 struct pollfd *p;
205 int err = 0, i, n = 0;
206
207 /*
208 * This is called from exitcalls elsewhere in UML - if
209 * sigio_cleanup has already run, then update_thread will hang
210 * or fail because the thread is no longer running.
211 */
212 if (write_sigio_pid == -1)
213 return -EIO;
214
215 sigio_lock();
216 for (i = 0; i < current_poll.used; i++) {
217 if (current_poll.poll[i].fd == fd)
218 break;
219 }
220 if (i == current_poll.used)
221 goto out;
222
223 err = need_poll(&next_poll, current_poll.used - 1);
224 if (err)
225 goto out;
226
227 for (i = 0; i < current_poll.used; i++) {
228 p = ¤t_poll.poll[i];
229 if (p->fd != fd)
230 next_poll.poll[n++] = *p;
231 }
232 next_poll.used = current_poll.used - 1;
233
234 update_thread();
235 out:
236 sigio_unlock();
237 return err;
238}
239
240static struct pollfd *setup_initial_poll(int fd)
241{
242 struct pollfd *p;
243
244 p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
245 if (p == NULL) {
246 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
247 "poll\n");
248 return NULL;
249 }
250 *p = ((struct pollfd) { .fd = fd,
251 .events = POLLIN,
252 .revents = 0 });
253 return p;
254}
255
256static void write_sigio_workaround(void)
257{
258 struct pollfd *p;
259 int err;
260 int l_write_sigio_fds[2];
261 int l_sigio_private[2];
262 int l_write_sigio_pid;
263
264 /* We call this *tons* of times - and most ones we must just fail. */
265 sigio_lock();
266 l_write_sigio_pid = write_sigio_pid;
267 sigio_unlock();
268
269 if (l_write_sigio_pid != -1)
270 return;
271
272 err = os_pipe(l_write_sigio_fds, 1, 1);
273 if (err < 0) {
274 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
275 "err = %d\n", -err);
276 return;
277 }
278 err = os_pipe(l_sigio_private, 1, 1);
279 if (err < 0) {
280 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
281 "err = %d\n", -err);
282 goto out_close1;
283 }
284
285 p = setup_initial_poll(l_sigio_private[1]);
286 if (!p)
287 goto out_close2;
288
289 sigio_lock();
290
291 /*
292 * Did we race? Don't try to optimize this, please, it's not so likely
293 * to happen, and no more than once at the boot.
294 */
295 if (write_sigio_pid != -1)
296 goto out_free;
297
298 current_poll = ((struct pollfds) { .poll = p,
299 .used = 1,
300 .size = 1 });
301
302 if (write_sigio_irq(l_write_sigio_fds[0]))
303 goto out_clear_poll;
304
305 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
306 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
307
308 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
309 CLONE_FILES | CLONE_VM,
310 &write_sigio_stack);
311
312 if (write_sigio_pid < 0)
313 goto out_clear;
314
315 sigio_unlock();
316 return;
317
318out_clear:
319 write_sigio_pid = -1;
320 write_sigio_fds[0] = -1;
321 write_sigio_fds[1] = -1;
322 sigio_private[0] = -1;
323 sigio_private[1] = -1;
324out_clear_poll:
325 current_poll = ((struct pollfds) { .poll = NULL,
326 .size = 0,
327 .used = 0 });
328out_free:
329 sigio_unlock();
330 kfree(p);
331out_close2:
332 close(l_sigio_private[0]);
333 close(l_sigio_private[1]);
334out_close1:
335 close(l_write_sigio_fds[0]);
336 close(l_write_sigio_fds[1]);
337}
338
339void sigio_broken(int fd, int read)
340{
341 int err;
342
343 write_sigio_workaround();
344
345 sigio_lock();
346 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
347 if (err) {
348 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
349 "for descriptor %d\n", fd);
350 goto out;
351 }
352
353 all_sigio_fds.poll[all_sigio_fds.used++] =
354 ((struct pollfd) { .fd = fd,
355 .events = read ? POLLIN : POLLOUT,
356 .revents = 0 });
357out:
358 sigio_unlock();
359}
360
361/* Changed during early boot */
362static int pty_output_sigio;
363static int pty_close_sigio;
364
365void maybe_sigio_broken(int fd, int read)
366{
367 if (!isatty(fd))
368 return;
369
370 if ((read || pty_output_sigio) && (!read || pty_close_sigio))
371 return;
372
373 sigio_broken(fd, read);
374}
375
376static void sigio_cleanup(void)
377{
378 if (write_sigio_pid == -1)
379 return;
380
381 os_kill_process(write_sigio_pid, 1);
382 free_stack(write_sigio_stack, 0);
383 write_sigio_pid = -1;
384}
385
386__uml_exitcall(sigio_cleanup);
387
388/* Used as a flag during SIGIO testing early in boot */
389static int got_sigio;
390
391static void __init handler(int sig)
392{
393 got_sigio = 1;
394}
395
396struct openpty_arg {
397 int master;
398 int slave;
399 int err;
400};
401
402static void openpty_cb(void *arg)
403{
404 struct openpty_arg *info = arg;
405
406 info->err = 0;
407 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
408 info->err = -errno;
409}
410
411static int async_pty(int master, int slave)
412{
413 int flags;
414
415 flags = fcntl(master, F_GETFL);
416 if (flags < 0)
417 return -errno;
418
419 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
420 (fcntl(master, F_SETOWN, os_getpid()) < 0))
421 return -errno;
422
423 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
424 return -errno;
425
426 return 0;
427}
428
429static void __init check_one_sigio(void (*proc)(int, int))
430{
431 struct sigaction old, new;
432 struct openpty_arg pty = { .master = -1, .slave = -1 };
433 int master, slave, err;
434
435 initial_thread_cb(openpty_cb, &pty);
436 if (pty.err) {
437 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
438 -pty.err);
439 return;
440 }
441
442 master = pty.master;
443 slave = pty.slave;
444
445 if ((master == -1) || (slave == -1)) {
446 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
447 "pty\n");
448 return;
449 }
450
451 /* Not now, but complain so we now where we failed. */
452 err = raw(master);
453 if (err < 0) {
454 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
455 -err);
456 return;
457 }
458
459 err = async_pty(master, slave);
460 if (err < 0) {
461 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
462 "err = %d\n", -err);
463 return;
464 }
465
466 if (sigaction(SIGIO, NULL, &old) < 0) {
467 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
468 "errno = %d\n", errno);
469 return;
470 }
471
472 new = old;
473 new.sa_handler = handler;
474 if (sigaction(SIGIO, &new, NULL) < 0) {
475 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
476 "errno = %d\n", errno);
477 return;
478 }
479
480 got_sigio = 0;
481 (*proc)(master, slave);
482
483 close(master);
484 close(slave);
485
486 if (sigaction(SIGIO, &old, NULL) < 0)
487 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
488 "errno = %d\n", errno);
489}
490
491static void tty_output(int master, int slave)
492{
493 int n;
494 char buf[512];
495
496 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
497
498 memset(buf, 0, sizeof(buf));
499
500 while (write(master, buf, sizeof(buf)) > 0) ;
501 if (errno != EAGAIN)
502 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
503 errno);
504 while (((n = read(slave, buf, sizeof(buf))) > 0) &&
505 !({ barrier(); got_sigio; }))
506 ;
507
508 if (got_sigio) {
509 printk(UM_KERN_CONT "Yes\n");
510 pty_output_sigio = 1;
511 } else if (n == -EAGAIN)
512 printk(UM_KERN_CONT "No, enabling workaround\n");
513 else
514 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
515}
516
517static void tty_close(int master, int slave)
518{
519 printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
520 "close...");
521
522 close(slave);
523 if (got_sigio) {
524 printk(UM_KERN_CONT "Yes\n");
525 pty_close_sigio = 1;
526 } else
527 printk(UM_KERN_CONT "No, enabling workaround\n");
528}
529
530static void __init check_sigio(void)
531{
532 if ((access("/dev/ptmx", R_OK) < 0) &&
533 (access("/dev/ptyp0", R_OK) < 0)) {
534 printk(UM_KERN_WARNING "No pseudo-terminals available - "
535 "skipping pty SIGIO check\n");
536 return;
537 }
538 check_one_sigio(tty_output);
539 check_one_sigio(tty_close);
540}
541
542/* Here because it only does the SIGIO testing for now */
543void __init os_check_bugs(void)
544{
545 check_sigio();
546}
1/*
2 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <unistd.h>
7#include <errno.h>
8#include <fcntl.h>
9#include <poll.h>
10#include <pty.h>
11#include <sched.h>
12#include <signal.h>
13#include <string.h>
14#include "kern_constants.h"
15#include "kern_util.h"
16#include "init.h"
17#include "os.h"
18#include "process.h"
19#include "sigio.h"
20#include "um_malloc.h"
21#include "user.h"
22
23/*
24 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
25 * exitcall.
26 */
27static int write_sigio_pid = -1;
28static unsigned long write_sigio_stack;
29
30/*
31 * These arrays are initialized before the sigio thread is started, and
32 * the descriptors closed after it is killed. So, it can't see them change.
33 * On the UML side, they are changed under the sigio_lock.
34 */
35#define SIGIO_FDS_INIT {-1, -1}
36
37static int write_sigio_fds[2] = SIGIO_FDS_INIT;
38static int sigio_private[2] = SIGIO_FDS_INIT;
39
40struct pollfds {
41 struct pollfd *poll;
42 int size;
43 int used;
44};
45
46/*
47 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
48 * synchronizes with it.
49 */
50static struct pollfds current_poll;
51static struct pollfds next_poll;
52static struct pollfds all_sigio_fds;
53
54static int write_sigio_thread(void *unused)
55{
56 struct pollfds *fds, tmp;
57 struct pollfd *p;
58 int i, n, respond_fd;
59 char c;
60
61 signal(SIGWINCH, SIG_IGN);
62 fds = ¤t_poll;
63 while (1) {
64 n = poll(fds->poll, fds->used, -1);
65 if (n < 0) {
66 if (errno == EINTR)
67 continue;
68 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
69 "%d, errno = %d\n", n, errno);
70 }
71 for (i = 0; i < fds->used; i++) {
72 p = &fds->poll[i];
73 if (p->revents == 0)
74 continue;
75 if (p->fd == sigio_private[1]) {
76 CATCH_EINTR(n = read(sigio_private[1], &c,
77 sizeof(c)));
78 if (n != sizeof(c))
79 printk(UM_KERN_ERR
80 "write_sigio_thread : "
81 "read on socket failed, "
82 "err = %d\n", errno);
83 tmp = current_poll;
84 current_poll = next_poll;
85 next_poll = tmp;
86 respond_fd = sigio_private[1];
87 }
88 else {
89 respond_fd = write_sigio_fds[1];
90 fds->used--;
91 memmove(&fds->poll[i], &fds->poll[i + 1],
92 (fds->used - i) * sizeof(*fds->poll));
93 }
94
95 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
96 if (n != sizeof(c))
97 printk(UM_KERN_ERR "write_sigio_thread : "
98 "write on socket failed, err = %d\n",
99 errno);
100 }
101 }
102
103 return 0;
104}
105
106static int need_poll(struct pollfds *polls, int n)
107{
108 struct pollfd *new;
109
110 if (n <= polls->size)
111 return 0;
112
113 new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
114 if (new == NULL) {
115 printk(UM_KERN_ERR "need_poll : failed to allocate new "
116 "pollfds\n");
117 return -ENOMEM;
118 }
119
120 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
121 kfree(polls->poll);
122
123 polls->poll = new;
124 polls->size = n;
125 return 0;
126}
127
128/*
129 * Must be called with sigio_lock held, because it's needed by the marked
130 * critical section.
131 */
132static void update_thread(void)
133{
134 unsigned long flags;
135 int n;
136 char c;
137
138 flags = set_signals(0);
139 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
140 if (n != sizeof(c)) {
141 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
142 errno);
143 goto fail;
144 }
145
146 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
147 if (n != sizeof(c)) {
148 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
149 errno);
150 goto fail;
151 }
152
153 set_signals(flags);
154 return;
155 fail:
156 /* Critical section start */
157 if (write_sigio_pid != -1) {
158 os_kill_process(write_sigio_pid, 1);
159 free_stack(write_sigio_stack, 0);
160 }
161 write_sigio_pid = -1;
162 close(sigio_private[0]);
163 close(sigio_private[1]);
164 close(write_sigio_fds[0]);
165 close(write_sigio_fds[1]);
166 /* Critical section end */
167 set_signals(flags);
168}
169
170int add_sigio_fd(int fd)
171{
172 struct pollfd *p;
173 int err = 0, i, n;
174
175 sigio_lock();
176 for (i = 0; i < all_sigio_fds.used; i++) {
177 if (all_sigio_fds.poll[i].fd == fd)
178 break;
179 }
180 if (i == all_sigio_fds.used)
181 goto out;
182
183 p = &all_sigio_fds.poll[i];
184
185 for (i = 0; i < current_poll.used; i++) {
186 if (current_poll.poll[i].fd == fd)
187 goto out;
188 }
189
190 n = current_poll.used;
191 err = need_poll(&next_poll, n + 1);
192 if (err)
193 goto out;
194
195 memcpy(next_poll.poll, current_poll.poll,
196 current_poll.used * sizeof(struct pollfd));
197 next_poll.poll[n] = *p;
198 next_poll.used = n + 1;
199 update_thread();
200 out:
201 sigio_unlock();
202 return err;
203}
204
205int ignore_sigio_fd(int fd)
206{
207 struct pollfd *p;
208 int err = 0, i, n = 0;
209
210 /*
211 * This is called from exitcalls elsewhere in UML - if
212 * sigio_cleanup has already run, then update_thread will hang
213 * or fail because the thread is no longer running.
214 */
215 if (write_sigio_pid == -1)
216 return -EIO;
217
218 sigio_lock();
219 for (i = 0; i < current_poll.used; i++) {
220 if (current_poll.poll[i].fd == fd)
221 break;
222 }
223 if (i == current_poll.used)
224 goto out;
225
226 err = need_poll(&next_poll, current_poll.used - 1);
227 if (err)
228 goto out;
229
230 for (i = 0; i < current_poll.used; i++) {
231 p = ¤t_poll.poll[i];
232 if (p->fd != fd)
233 next_poll.poll[n++] = *p;
234 }
235 next_poll.used = current_poll.used - 1;
236
237 update_thread();
238 out:
239 sigio_unlock();
240 return err;
241}
242
243static struct pollfd *setup_initial_poll(int fd)
244{
245 struct pollfd *p;
246
247 p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
248 if (p == NULL) {
249 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
250 "poll\n");
251 return NULL;
252 }
253 *p = ((struct pollfd) { .fd = fd,
254 .events = POLLIN,
255 .revents = 0 });
256 return p;
257}
258
259static void write_sigio_workaround(void)
260{
261 struct pollfd *p;
262 int err;
263 int l_write_sigio_fds[2];
264 int l_sigio_private[2];
265 int l_write_sigio_pid;
266
267 /* We call this *tons* of times - and most ones we must just fail. */
268 sigio_lock();
269 l_write_sigio_pid = write_sigio_pid;
270 sigio_unlock();
271
272 if (l_write_sigio_pid != -1)
273 return;
274
275 err = os_pipe(l_write_sigio_fds, 1, 1);
276 if (err < 0) {
277 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
278 "err = %d\n", -err);
279 return;
280 }
281 err = os_pipe(l_sigio_private, 1, 1);
282 if (err < 0) {
283 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
284 "err = %d\n", -err);
285 goto out_close1;
286 }
287
288 p = setup_initial_poll(l_sigio_private[1]);
289 if (!p)
290 goto out_close2;
291
292 sigio_lock();
293
294 /*
295 * Did we race? Don't try to optimize this, please, it's not so likely
296 * to happen, and no more than once at the boot.
297 */
298 if (write_sigio_pid != -1)
299 goto out_free;
300
301 current_poll = ((struct pollfds) { .poll = p,
302 .used = 1,
303 .size = 1 });
304
305 if (write_sigio_irq(l_write_sigio_fds[0]))
306 goto out_clear_poll;
307
308 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
309 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
310
311 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
312 CLONE_FILES | CLONE_VM,
313 &write_sigio_stack);
314
315 if (write_sigio_pid < 0)
316 goto out_clear;
317
318 sigio_unlock();
319 return;
320
321out_clear:
322 write_sigio_pid = -1;
323 write_sigio_fds[0] = -1;
324 write_sigio_fds[1] = -1;
325 sigio_private[0] = -1;
326 sigio_private[1] = -1;
327out_clear_poll:
328 current_poll = ((struct pollfds) { .poll = NULL,
329 .size = 0,
330 .used = 0 });
331out_free:
332 sigio_unlock();
333 kfree(p);
334out_close2:
335 close(l_sigio_private[0]);
336 close(l_sigio_private[1]);
337out_close1:
338 close(l_write_sigio_fds[0]);
339 close(l_write_sigio_fds[1]);
340}
341
342void sigio_broken(int fd, int read)
343{
344 int err;
345
346 write_sigio_workaround();
347
348 sigio_lock();
349 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
350 if (err) {
351 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
352 "for descriptor %d\n", fd);
353 goto out;
354 }
355
356 all_sigio_fds.poll[all_sigio_fds.used++] =
357 ((struct pollfd) { .fd = fd,
358 .events = read ? POLLIN : POLLOUT,
359 .revents = 0 });
360out:
361 sigio_unlock();
362}
363
364/* Changed during early boot */
365static int pty_output_sigio;
366static int pty_close_sigio;
367
368void maybe_sigio_broken(int fd, int read)
369{
370 if (!isatty(fd))
371 return;
372
373 if ((read || pty_output_sigio) && (!read || pty_close_sigio))
374 return;
375
376 sigio_broken(fd, read);
377}
378
379static void sigio_cleanup(void)
380{
381 if (write_sigio_pid == -1)
382 return;
383
384 os_kill_process(write_sigio_pid, 1);
385 free_stack(write_sigio_stack, 0);
386 write_sigio_pid = -1;
387}
388
389__uml_exitcall(sigio_cleanup);
390
391/* Used as a flag during SIGIO testing early in boot */
392static int got_sigio;
393
394static void __init handler(int sig)
395{
396 got_sigio = 1;
397}
398
399struct openpty_arg {
400 int master;
401 int slave;
402 int err;
403};
404
405static void openpty_cb(void *arg)
406{
407 struct openpty_arg *info = arg;
408
409 info->err = 0;
410 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
411 info->err = -errno;
412}
413
414static int async_pty(int master, int slave)
415{
416 int flags;
417
418 flags = fcntl(master, F_GETFL);
419 if (flags < 0)
420 return -errno;
421
422 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
423 (fcntl(master, F_SETOWN, os_getpid()) < 0))
424 return -errno;
425
426 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
427 return -errno;
428
429 return 0;
430}
431
432static void __init check_one_sigio(void (*proc)(int, int))
433{
434 struct sigaction old, new;
435 struct openpty_arg pty = { .master = -1, .slave = -1 };
436 int master, slave, err;
437
438 initial_thread_cb(openpty_cb, &pty);
439 if (pty.err) {
440 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
441 -pty.err);
442 return;
443 }
444
445 master = pty.master;
446 slave = pty.slave;
447
448 if ((master == -1) || (slave == -1)) {
449 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
450 "pty\n");
451 return;
452 }
453
454 /* Not now, but complain so we now where we failed. */
455 err = raw(master);
456 if (err < 0) {
457 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
458 -err);
459 return;
460 }
461
462 err = async_pty(master, slave);
463 if (err < 0) {
464 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
465 "err = %d\n", -err);
466 return;
467 }
468
469 if (sigaction(SIGIO, NULL, &old) < 0) {
470 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
471 "errno = %d\n", errno);
472 return;
473 }
474
475 new = old;
476 new.sa_handler = handler;
477 if (sigaction(SIGIO, &new, NULL) < 0) {
478 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
479 "errno = %d\n", errno);
480 return;
481 }
482
483 got_sigio = 0;
484 (*proc)(master, slave);
485
486 close(master);
487 close(slave);
488
489 if (sigaction(SIGIO, &old, NULL) < 0)
490 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
491 "errno = %d\n", errno);
492}
493
494static void tty_output(int master, int slave)
495{
496 int n;
497 char buf[512];
498
499 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
500
501 memset(buf, 0, sizeof(buf));
502
503 while (write(master, buf, sizeof(buf)) > 0) ;
504 if (errno != EAGAIN)
505 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
506 errno);
507 while (((n = read(slave, buf, sizeof(buf))) > 0) &&
508 !({ barrier(); got_sigio; }))
509 ;
510
511 if (got_sigio) {
512 printk(UM_KERN_CONT "Yes\n");
513 pty_output_sigio = 1;
514 } else if (n == -EAGAIN)
515 printk(UM_KERN_CONT "No, enabling workaround\n");
516 else
517 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
518}
519
520static void tty_close(int master, int slave)
521{
522 printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
523 "close...");
524
525 close(slave);
526 if (got_sigio) {
527 printk(UM_KERN_CONT "Yes\n");
528 pty_close_sigio = 1;
529 } else
530 printk(UM_KERN_CONT "No, enabling workaround\n");
531}
532
533static void __init check_sigio(void)
534{
535 if ((access("/dev/ptmx", R_OK) < 0) &&
536 (access("/dev/ptyp0", R_OK) < 0)) {
537 printk(UM_KERN_WARNING "No pseudo-terminals available - "
538 "skipping pty SIGIO check\n");
539 return;
540 }
541 check_one_sigio(tty_output);
542 check_one_sigio(tty_close);
543}
544
545/* Here because it only does the SIGIO testing for now */
546void __init os_check_bugs(void)
547{
548 check_sigio();
549}