Loading...
1/*
2 * This file contains the procedures for the handling of select and poll
3 *
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
6 *
7 * 4 February 1994
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
11 *
12 * 24 January 2000
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
15 */
16
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/syscalls.h>
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/poll.h>
23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
24#include <linux/file.h>
25#include <linux/fdtable.h>
26#include <linux/fs.h>
27#include <linux/rcupdate.h>
28#include <linux/hrtimer.h>
29#include <linux/sched/rt.h>
30#include <linux/freezer.h>
31#include <net/busy_poll.h>
32
33#include <asm/uaccess.h>
34
35
36/*
37 * Estimate expected accuracy in ns from a timeval.
38 *
39 * After quite a bit of churning around, we've settled on
40 * a simple thing of taking 0.1% of the timeout as the
41 * slack, with a cap of 100 msec.
42 * "nice" tasks get a 0.5% slack instead.
43 *
44 * Consider this comment an open invitation to come up with even
45 * better solutions..
46 */
47
48#define MAX_SLACK (100 * NSEC_PER_MSEC)
49
50static long __estimate_accuracy(struct timespec *tv)
51{
52 long slack;
53 int divfactor = 1000;
54
55 if (tv->tv_sec < 0)
56 return 0;
57
58 if (task_nice(current) > 0)
59 divfactor = divfactor / 5;
60
61 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
62 return MAX_SLACK;
63
64 slack = tv->tv_nsec / divfactor;
65 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
66
67 if (slack > MAX_SLACK)
68 return MAX_SLACK;
69
70 return slack;
71}
72
73u64 select_estimate_accuracy(struct timespec *tv)
74{
75 u64 ret;
76 struct timespec now;
77
78 /*
79 * Realtime tasks get a slack of 0 for obvious reasons.
80 */
81
82 if (rt_task(current))
83 return 0;
84
85 ktime_get_ts(&now);
86 now = timespec_sub(*tv, now);
87 ret = __estimate_accuracy(&now);
88 if (ret < current->timer_slack_ns)
89 return current->timer_slack_ns;
90 return ret;
91}
92
93
94
95struct poll_table_page {
96 struct poll_table_page * next;
97 struct poll_table_entry * entry;
98 struct poll_table_entry entries[0];
99};
100
101#define POLL_TABLE_FULL(table) \
102 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
103
104/*
105 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
106 * I have rewritten this, taking some shortcuts: This code may not be easy to
107 * follow, but it should be free of race-conditions, and it's practical. If you
108 * understand what I'm doing here, then you understand how the linux
109 * sleep/wakeup mechanism works.
110 *
111 * Two very simple procedures, poll_wait() and poll_freewait() make all the
112 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
113 * as all select/poll functions have to call it to add an entry to the
114 * poll table.
115 */
116static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
117 poll_table *p);
118
119void poll_initwait(struct poll_wqueues *pwq)
120{
121 init_poll_funcptr(&pwq->pt, __pollwait);
122 pwq->polling_task = current;
123 pwq->triggered = 0;
124 pwq->error = 0;
125 pwq->table = NULL;
126 pwq->inline_index = 0;
127}
128EXPORT_SYMBOL(poll_initwait);
129
130static void free_poll_entry(struct poll_table_entry *entry)
131{
132 remove_wait_queue(entry->wait_address, &entry->wait);
133 fput(entry->filp);
134}
135
136void poll_freewait(struct poll_wqueues *pwq)
137{
138 struct poll_table_page * p = pwq->table;
139 int i;
140 for (i = 0; i < pwq->inline_index; i++)
141 free_poll_entry(pwq->inline_entries + i);
142 while (p) {
143 struct poll_table_entry * entry;
144 struct poll_table_page *old;
145
146 entry = p->entry;
147 do {
148 entry--;
149 free_poll_entry(entry);
150 } while (entry > p->entries);
151 old = p;
152 p = p->next;
153 free_page((unsigned long) old);
154 }
155}
156EXPORT_SYMBOL(poll_freewait);
157
158static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
159{
160 struct poll_table_page *table = p->table;
161
162 if (p->inline_index < N_INLINE_POLL_ENTRIES)
163 return p->inline_entries + p->inline_index++;
164
165 if (!table || POLL_TABLE_FULL(table)) {
166 struct poll_table_page *new_table;
167
168 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
169 if (!new_table) {
170 p->error = -ENOMEM;
171 return NULL;
172 }
173 new_table->entry = new_table->entries;
174 new_table->next = table;
175 p->table = new_table;
176 table = new_table;
177 }
178
179 return table->entry++;
180}
181
182static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
183{
184 struct poll_wqueues *pwq = wait->private;
185 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
186
187 /*
188 * Although this function is called under waitqueue lock, LOCK
189 * doesn't imply write barrier and the users expect write
190 * barrier semantics on wakeup functions. The following
191 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192 * and is paired with smp_store_mb() in poll_schedule_timeout.
193 */
194 smp_wmb();
195 pwq->triggered = 1;
196
197 /*
198 * Perform the default wake up operation using a dummy
199 * waitqueue.
200 *
201 * TODO: This is hacky but there currently is no interface to
202 * pass in @sync. @sync is scheduled to be removed and once
203 * that happens, wake_up_process() can be used directly.
204 */
205 return default_wake_function(&dummy_wait, mode, sync, key);
206}
207
208static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
209{
210 struct poll_table_entry *entry;
211
212 entry = container_of(wait, struct poll_table_entry, wait);
213 if (key && !((unsigned long)key & entry->key))
214 return 0;
215 return __pollwake(wait, mode, sync, key);
216}
217
218/* Add a new entry */
219static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
220 poll_table *p)
221{
222 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
223 struct poll_table_entry *entry = poll_get_entry(pwq);
224 if (!entry)
225 return;
226 entry->filp = get_file(filp);
227 entry->wait_address = wait_address;
228 entry->key = p->_key;
229 init_waitqueue_func_entry(&entry->wait, pollwake);
230 entry->wait.private = pwq;
231 add_wait_queue(wait_address, &entry->wait);
232}
233
234int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
235 ktime_t *expires, unsigned long slack)
236{
237 int rc = -EINTR;
238
239 set_current_state(state);
240 if (!pwq->triggered)
241 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
242 __set_current_state(TASK_RUNNING);
243
244 /*
245 * Prepare for the next iteration.
246 *
247 * The following smp_store_mb() serves two purposes. First, it's
248 * the counterpart rmb of the wmb in pollwake() such that data
249 * written before wake up is always visible after wake up.
250 * Second, the full barrier guarantees that triggered clearing
251 * doesn't pass event check of the next iteration. Note that
252 * this problem doesn't exist for the first iteration as
253 * add_wait_queue() has full barrier semantics.
254 */
255 smp_store_mb(pwq->triggered, 0);
256
257 return rc;
258}
259EXPORT_SYMBOL(poll_schedule_timeout);
260
261/**
262 * poll_select_set_timeout - helper function to setup the timeout value
263 * @to: pointer to timespec variable for the final timeout
264 * @sec: seconds (from user space)
265 * @nsec: nanoseconds (from user space)
266 *
267 * Note, we do not use a timespec for the user space value here, That
268 * way we can use the function for timeval and compat interfaces as well.
269 *
270 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
271 */
272int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
273{
274 struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
275
276 if (!timespec_valid(&ts))
277 return -EINVAL;
278
279 /* Optimize for the zero timeout value here */
280 if (!sec && !nsec) {
281 to->tv_sec = to->tv_nsec = 0;
282 } else {
283 ktime_get_ts(to);
284 *to = timespec_add_safe(*to, ts);
285 }
286 return 0;
287}
288
289static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
290 int timeval, int ret)
291{
292 struct timespec rts;
293 struct timeval rtv;
294
295 if (!p)
296 return ret;
297
298 if (current->personality & STICKY_TIMEOUTS)
299 goto sticky;
300
301 /* No update for zero timeout */
302 if (!end_time->tv_sec && !end_time->tv_nsec)
303 return ret;
304
305 ktime_get_ts(&rts);
306 rts = timespec_sub(*end_time, rts);
307 if (rts.tv_sec < 0)
308 rts.tv_sec = rts.tv_nsec = 0;
309
310 if (timeval) {
311 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
312 memset(&rtv, 0, sizeof(rtv));
313 rtv.tv_sec = rts.tv_sec;
314 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
315
316 if (!copy_to_user(p, &rtv, sizeof(rtv)))
317 return ret;
318
319 } else if (!copy_to_user(p, &rts, sizeof(rts)))
320 return ret;
321
322 /*
323 * If an application puts its timeval in read-only memory, we
324 * don't want the Linux-specific update to the timeval to
325 * cause a fault after the select has completed
326 * successfully. However, because we're not updating the
327 * timeval, we can't restart the system call.
328 */
329
330sticky:
331 if (ret == -ERESTARTNOHAND)
332 ret = -EINTR;
333 return ret;
334}
335
336#define FDS_IN(fds, n) (fds->in + n)
337#define FDS_OUT(fds, n) (fds->out + n)
338#define FDS_EX(fds, n) (fds->ex + n)
339
340#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
341
342static int max_select_fd(unsigned long n, fd_set_bits *fds)
343{
344 unsigned long *open_fds;
345 unsigned long set;
346 int max;
347 struct fdtable *fdt;
348
349 /* handle last in-complete long-word first */
350 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
351 n /= BITS_PER_LONG;
352 fdt = files_fdtable(current->files);
353 open_fds = fdt->open_fds + n;
354 max = 0;
355 if (set) {
356 set &= BITS(fds, n);
357 if (set) {
358 if (!(set & ~*open_fds))
359 goto get_max;
360 return -EBADF;
361 }
362 }
363 while (n) {
364 open_fds--;
365 n--;
366 set = BITS(fds, n);
367 if (!set)
368 continue;
369 if (set & ~*open_fds)
370 return -EBADF;
371 if (max)
372 continue;
373get_max:
374 do {
375 max++;
376 set >>= 1;
377 } while (set);
378 max += n * BITS_PER_LONG;
379 }
380
381 return max;
382}
383
384#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
385#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
386#define POLLEX_SET (POLLPRI)
387
388static inline void wait_key_set(poll_table *wait, unsigned long in,
389 unsigned long out, unsigned long bit,
390 unsigned int ll_flag)
391{
392 wait->_key = POLLEX_SET | ll_flag;
393 if (in & bit)
394 wait->_key |= POLLIN_SET;
395 if (out & bit)
396 wait->_key |= POLLOUT_SET;
397}
398
399int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
400{
401 ktime_t expire, *to = NULL;
402 struct poll_wqueues table;
403 poll_table *wait;
404 int retval, i, timed_out = 0;
405 u64 slack = 0;
406 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
407 unsigned long busy_end = 0;
408
409 rcu_read_lock();
410 retval = max_select_fd(n, fds);
411 rcu_read_unlock();
412
413 if (retval < 0)
414 return retval;
415 n = retval;
416
417 poll_initwait(&table);
418 wait = &table.pt;
419 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
420 wait->_qproc = NULL;
421 timed_out = 1;
422 }
423
424 if (end_time && !timed_out)
425 slack = select_estimate_accuracy(end_time);
426
427 retval = 0;
428 for (;;) {
429 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
430 bool can_busy_loop = false;
431
432 inp = fds->in; outp = fds->out; exp = fds->ex;
433 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
434
435 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
436 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
437 unsigned long res_in = 0, res_out = 0, res_ex = 0;
438
439 in = *inp++; out = *outp++; ex = *exp++;
440 all_bits = in | out | ex;
441 if (all_bits == 0) {
442 i += BITS_PER_LONG;
443 continue;
444 }
445
446 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
447 struct fd f;
448 if (i >= n)
449 break;
450 if (!(bit & all_bits))
451 continue;
452 f = fdget(i);
453 if (f.file) {
454 const struct file_operations *f_op;
455 f_op = f.file->f_op;
456 mask = DEFAULT_POLLMASK;
457 if (f_op->poll) {
458 wait_key_set(wait, in, out,
459 bit, busy_flag);
460 mask = (*f_op->poll)(f.file, wait);
461 }
462 fdput(f);
463 if ((mask & POLLIN_SET) && (in & bit)) {
464 res_in |= bit;
465 retval++;
466 wait->_qproc = NULL;
467 }
468 if ((mask & POLLOUT_SET) && (out & bit)) {
469 res_out |= bit;
470 retval++;
471 wait->_qproc = NULL;
472 }
473 if ((mask & POLLEX_SET) && (ex & bit)) {
474 res_ex |= bit;
475 retval++;
476 wait->_qproc = NULL;
477 }
478 /* got something, stop busy polling */
479 if (retval) {
480 can_busy_loop = false;
481 busy_flag = 0;
482
483 /*
484 * only remember a returned
485 * POLL_BUSY_LOOP if we asked for it
486 */
487 } else if (busy_flag & mask)
488 can_busy_loop = true;
489
490 }
491 }
492 if (res_in)
493 *rinp = res_in;
494 if (res_out)
495 *routp = res_out;
496 if (res_ex)
497 *rexp = res_ex;
498 cond_resched();
499 }
500 wait->_qproc = NULL;
501 if (retval || timed_out || signal_pending(current))
502 break;
503 if (table.error) {
504 retval = table.error;
505 break;
506 }
507
508 /* only if found POLL_BUSY_LOOP sockets && not out of time */
509 if (can_busy_loop && !need_resched()) {
510 if (!busy_end) {
511 busy_end = busy_loop_end_time();
512 continue;
513 }
514 if (!busy_loop_timeout(busy_end))
515 continue;
516 }
517 busy_flag = 0;
518
519 /*
520 * If this is the first loop and we have a timeout
521 * given, then we convert to ktime_t and set the to
522 * pointer to the expiry value.
523 */
524 if (end_time && !to) {
525 expire = timespec_to_ktime(*end_time);
526 to = &expire;
527 }
528
529 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
530 to, slack))
531 timed_out = 1;
532 }
533
534 poll_freewait(&table);
535
536 return retval;
537}
538
539/*
540 * We can actually return ERESTARTSYS instead of EINTR, but I'd
541 * like to be certain this leads to no problems. So I return
542 * EINTR just for safety.
543 *
544 * Update: ERESTARTSYS breaks at least the xview clock binary, so
545 * I'm trying ERESTARTNOHAND which restart only when you want to.
546 */
547int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
548 fd_set __user *exp, struct timespec *end_time)
549{
550 fd_set_bits fds;
551 void *bits;
552 int ret, max_fds;
553 unsigned int size;
554 struct fdtable *fdt;
555 /* Allocate small arguments on the stack to save memory and be faster */
556 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
557
558 ret = -EINVAL;
559 if (n < 0)
560 goto out_nofds;
561
562 /* max_fds can increase, so grab it once to avoid race */
563 rcu_read_lock();
564 fdt = files_fdtable(current->files);
565 max_fds = fdt->max_fds;
566 rcu_read_unlock();
567 if (n > max_fds)
568 n = max_fds;
569
570 /*
571 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
572 * since we used fdset we need to allocate memory in units of
573 * long-words.
574 */
575 size = FDS_BYTES(n);
576 bits = stack_fds;
577 if (size > sizeof(stack_fds) / 6) {
578 /* Not enough space in on-stack array; must use kmalloc */
579 ret = -ENOMEM;
580 bits = kmalloc(6 * size, GFP_KERNEL);
581 if (!bits)
582 goto out_nofds;
583 }
584 fds.in = bits;
585 fds.out = bits + size;
586 fds.ex = bits + 2*size;
587 fds.res_in = bits + 3*size;
588 fds.res_out = bits + 4*size;
589 fds.res_ex = bits + 5*size;
590
591 if ((ret = get_fd_set(n, inp, fds.in)) ||
592 (ret = get_fd_set(n, outp, fds.out)) ||
593 (ret = get_fd_set(n, exp, fds.ex)))
594 goto out;
595 zero_fd_set(n, fds.res_in);
596 zero_fd_set(n, fds.res_out);
597 zero_fd_set(n, fds.res_ex);
598
599 ret = do_select(n, &fds, end_time);
600
601 if (ret < 0)
602 goto out;
603 if (!ret) {
604 ret = -ERESTARTNOHAND;
605 if (signal_pending(current))
606 goto out;
607 ret = 0;
608 }
609
610 if (set_fd_set(n, inp, fds.res_in) ||
611 set_fd_set(n, outp, fds.res_out) ||
612 set_fd_set(n, exp, fds.res_ex))
613 ret = -EFAULT;
614
615out:
616 if (bits != stack_fds)
617 kfree(bits);
618out_nofds:
619 return ret;
620}
621
622SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
623 fd_set __user *, exp, struct timeval __user *, tvp)
624{
625 struct timespec end_time, *to = NULL;
626 struct timeval tv;
627 int ret;
628
629 if (tvp) {
630 if (copy_from_user(&tv, tvp, sizeof(tv)))
631 return -EFAULT;
632
633 to = &end_time;
634 if (poll_select_set_timeout(to,
635 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
636 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
637 return -EINVAL;
638 }
639
640 ret = core_sys_select(n, inp, outp, exp, to);
641 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
642
643 return ret;
644}
645
646static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
647 fd_set __user *exp, struct timespec __user *tsp,
648 const sigset_t __user *sigmask, size_t sigsetsize)
649{
650 sigset_t ksigmask, sigsaved;
651 struct timespec ts, end_time, *to = NULL;
652 int ret;
653
654 if (tsp) {
655 if (copy_from_user(&ts, tsp, sizeof(ts)))
656 return -EFAULT;
657
658 to = &end_time;
659 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
660 return -EINVAL;
661 }
662
663 if (sigmask) {
664 /* XXX: Don't preclude handling different sized sigset_t's. */
665 if (sigsetsize != sizeof(sigset_t))
666 return -EINVAL;
667 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
668 return -EFAULT;
669
670 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
671 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
672 }
673
674 ret = core_sys_select(n, inp, outp, exp, to);
675 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
676
677 if (ret == -ERESTARTNOHAND) {
678 /*
679 * Don't restore the signal mask yet. Let do_signal() deliver
680 * the signal on the way back to userspace, before the signal
681 * mask is restored.
682 */
683 if (sigmask) {
684 memcpy(¤t->saved_sigmask, &sigsaved,
685 sizeof(sigsaved));
686 set_restore_sigmask();
687 }
688 } else if (sigmask)
689 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
690
691 return ret;
692}
693
694/*
695 * Most architectures can't handle 7-argument syscalls. So we provide a
696 * 6-argument version where the sixth argument is a pointer to a structure
697 * which has a pointer to the sigset_t itself followed by a size_t containing
698 * the sigset size.
699 */
700SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
701 fd_set __user *, exp, struct timespec __user *, tsp,
702 void __user *, sig)
703{
704 size_t sigsetsize = 0;
705 sigset_t __user *up = NULL;
706
707 if (sig) {
708 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
709 || __get_user(up, (sigset_t __user * __user *)sig)
710 || __get_user(sigsetsize,
711 (size_t __user *)(sig+sizeof(void *))))
712 return -EFAULT;
713 }
714
715 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
716}
717
718#ifdef __ARCH_WANT_SYS_OLD_SELECT
719struct sel_arg_struct {
720 unsigned long n;
721 fd_set __user *inp, *outp, *exp;
722 struct timeval __user *tvp;
723};
724
725SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
726{
727 struct sel_arg_struct a;
728
729 if (copy_from_user(&a, arg, sizeof(a)))
730 return -EFAULT;
731 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
732}
733#endif
734
735struct poll_list {
736 struct poll_list *next;
737 int len;
738 struct pollfd entries[0];
739};
740
741#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
742
743/*
744 * Fish for pollable events on the pollfd->fd file descriptor. We're only
745 * interested in events matching the pollfd->events mask, and the result
746 * matching that mask is both recorded in pollfd->revents and returned. The
747 * pwait poll_table will be used by the fd-provided poll handler for waiting,
748 * if pwait->_qproc is non-NULL.
749 */
750static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
751 bool *can_busy_poll,
752 unsigned int busy_flag)
753{
754 unsigned int mask;
755 int fd;
756
757 mask = 0;
758 fd = pollfd->fd;
759 if (fd >= 0) {
760 struct fd f = fdget(fd);
761 mask = POLLNVAL;
762 if (f.file) {
763 mask = DEFAULT_POLLMASK;
764 if (f.file->f_op->poll) {
765 pwait->_key = pollfd->events|POLLERR|POLLHUP;
766 pwait->_key |= busy_flag;
767 mask = f.file->f_op->poll(f.file, pwait);
768 if (mask & busy_flag)
769 *can_busy_poll = true;
770 }
771 /* Mask out unneeded events. */
772 mask &= pollfd->events | POLLERR | POLLHUP;
773 fdput(f);
774 }
775 }
776 pollfd->revents = mask;
777
778 return mask;
779}
780
781static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
782 struct timespec *end_time)
783{
784 poll_table* pt = &wait->pt;
785 ktime_t expire, *to = NULL;
786 int timed_out = 0, count = 0;
787 u64 slack = 0;
788 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
789 unsigned long busy_end = 0;
790
791 /* Optimise the no-wait case */
792 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
793 pt->_qproc = NULL;
794 timed_out = 1;
795 }
796
797 if (end_time && !timed_out)
798 slack = select_estimate_accuracy(end_time);
799
800 for (;;) {
801 struct poll_list *walk;
802 bool can_busy_loop = false;
803
804 for (walk = list; walk != NULL; walk = walk->next) {
805 struct pollfd * pfd, * pfd_end;
806
807 pfd = walk->entries;
808 pfd_end = pfd + walk->len;
809 for (; pfd != pfd_end; pfd++) {
810 /*
811 * Fish for events. If we found one, record it
812 * and kill poll_table->_qproc, so we don't
813 * needlessly register any other waiters after
814 * this. They'll get immediately deregistered
815 * when we break out and return.
816 */
817 if (do_pollfd(pfd, pt, &can_busy_loop,
818 busy_flag)) {
819 count++;
820 pt->_qproc = NULL;
821 /* found something, stop busy polling */
822 busy_flag = 0;
823 can_busy_loop = false;
824 }
825 }
826 }
827 /*
828 * All waiters have already been registered, so don't provide
829 * a poll_table->_qproc to them on the next loop iteration.
830 */
831 pt->_qproc = NULL;
832 if (!count) {
833 count = wait->error;
834 if (signal_pending(current))
835 count = -EINTR;
836 }
837 if (count || timed_out)
838 break;
839
840 /* only if found POLL_BUSY_LOOP sockets && not out of time */
841 if (can_busy_loop && !need_resched()) {
842 if (!busy_end) {
843 busy_end = busy_loop_end_time();
844 continue;
845 }
846 if (!busy_loop_timeout(busy_end))
847 continue;
848 }
849 busy_flag = 0;
850
851 /*
852 * If this is the first loop and we have a timeout
853 * given, then we convert to ktime_t and set the to
854 * pointer to the expiry value.
855 */
856 if (end_time && !to) {
857 expire = timespec_to_ktime(*end_time);
858 to = &expire;
859 }
860
861 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
862 timed_out = 1;
863 }
864 return count;
865}
866
867#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
868 sizeof(struct pollfd))
869
870int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
871 struct timespec *end_time)
872{
873 struct poll_wqueues table;
874 int err = -EFAULT, fdcount, len, size;
875 /* Allocate small arguments on the stack to save memory and be
876 faster - use long to make sure the buffer is aligned properly
877 on 64 bit archs to avoid unaligned access */
878 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
879 struct poll_list *const head = (struct poll_list *)stack_pps;
880 struct poll_list *walk = head;
881 unsigned long todo = nfds;
882
883 if (nfds > rlimit(RLIMIT_NOFILE))
884 return -EINVAL;
885
886 len = min_t(unsigned int, nfds, N_STACK_PPS);
887 for (;;) {
888 walk->next = NULL;
889 walk->len = len;
890 if (!len)
891 break;
892
893 if (copy_from_user(walk->entries, ufds + nfds-todo,
894 sizeof(struct pollfd) * walk->len))
895 goto out_fds;
896
897 todo -= walk->len;
898 if (!todo)
899 break;
900
901 len = min(todo, POLLFD_PER_PAGE);
902 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
903 walk = walk->next = kmalloc(size, GFP_KERNEL);
904 if (!walk) {
905 err = -ENOMEM;
906 goto out_fds;
907 }
908 }
909
910 poll_initwait(&table);
911 fdcount = do_poll(head, &table, end_time);
912 poll_freewait(&table);
913
914 for (walk = head; walk; walk = walk->next) {
915 struct pollfd *fds = walk->entries;
916 int j;
917
918 for (j = 0; j < walk->len; j++, ufds++)
919 if (__put_user(fds[j].revents, &ufds->revents))
920 goto out_fds;
921 }
922
923 err = fdcount;
924out_fds:
925 walk = head->next;
926 while (walk) {
927 struct poll_list *pos = walk;
928 walk = walk->next;
929 kfree(pos);
930 }
931
932 return err;
933}
934
935static long do_restart_poll(struct restart_block *restart_block)
936{
937 struct pollfd __user *ufds = restart_block->poll.ufds;
938 int nfds = restart_block->poll.nfds;
939 struct timespec *to = NULL, end_time;
940 int ret;
941
942 if (restart_block->poll.has_timeout) {
943 end_time.tv_sec = restart_block->poll.tv_sec;
944 end_time.tv_nsec = restart_block->poll.tv_nsec;
945 to = &end_time;
946 }
947
948 ret = do_sys_poll(ufds, nfds, to);
949
950 if (ret == -EINTR) {
951 restart_block->fn = do_restart_poll;
952 ret = -ERESTART_RESTARTBLOCK;
953 }
954 return ret;
955}
956
957SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
958 int, timeout_msecs)
959{
960 struct timespec end_time, *to = NULL;
961 int ret;
962
963 if (timeout_msecs >= 0) {
964 to = &end_time;
965 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
966 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
967 }
968
969 ret = do_sys_poll(ufds, nfds, to);
970
971 if (ret == -EINTR) {
972 struct restart_block *restart_block;
973
974 restart_block = ¤t->restart_block;
975 restart_block->fn = do_restart_poll;
976 restart_block->poll.ufds = ufds;
977 restart_block->poll.nfds = nfds;
978
979 if (timeout_msecs >= 0) {
980 restart_block->poll.tv_sec = end_time.tv_sec;
981 restart_block->poll.tv_nsec = end_time.tv_nsec;
982 restart_block->poll.has_timeout = 1;
983 } else
984 restart_block->poll.has_timeout = 0;
985
986 ret = -ERESTART_RESTARTBLOCK;
987 }
988 return ret;
989}
990
991SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
992 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
993 size_t, sigsetsize)
994{
995 sigset_t ksigmask, sigsaved;
996 struct timespec ts, end_time, *to = NULL;
997 int ret;
998
999 if (tsp) {
1000 if (copy_from_user(&ts, tsp, sizeof(ts)))
1001 return -EFAULT;
1002
1003 to = &end_time;
1004 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1005 return -EINVAL;
1006 }
1007
1008 if (sigmask) {
1009 /* XXX: Don't preclude handling different sized sigset_t's. */
1010 if (sigsetsize != sizeof(sigset_t))
1011 return -EINVAL;
1012 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1013 return -EFAULT;
1014
1015 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1016 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1017 }
1018
1019 ret = do_sys_poll(ufds, nfds, to);
1020
1021 /* We can restart this syscall, usually */
1022 if (ret == -EINTR) {
1023 /*
1024 * Don't restore the signal mask yet. Let do_signal() deliver
1025 * the signal on the way back to userspace, before the signal
1026 * mask is restored.
1027 */
1028 if (sigmask) {
1029 memcpy(¤t->saved_sigmask, &sigsaved,
1030 sizeof(sigsaved));
1031 set_restore_sigmask();
1032 }
1033 ret = -ERESTARTNOHAND;
1034 } else if (sigmask)
1035 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1036
1037 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1038
1039 return ret;
1040}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains the procedures for the handling of select and poll
4 *
5 * Created for Linux based loosely upon Mathius Lattner's minix
6 * patches by Peter MacDonald. Heavily edited by Linus.
7 *
8 * 4 February 1994
9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
10 * flag set in its personality we do *not* modify the given timeout
11 * parameter to reflect time remaining.
12 *
13 * 24 January 2000
14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
16 */
17
18#include <linux/kernel.h>
19#include <linux/sched/signal.h>
20#include <linux/sched/rt.h>
21#include <linux/syscalls.h>
22#include <linux/export.h>
23#include <linux/slab.h>
24#include <linux/poll.h>
25#include <linux/personality.h> /* for STICKY_TIMEOUTS */
26#include <linux/file.h>
27#include <linux/fdtable.h>
28#include <linux/fs.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/freezer.h>
32#include <net/busy_poll.h>
33#include <linux/vmalloc.h>
34
35#include <linux/uaccess.h>
36
37
38/*
39 * Estimate expected accuracy in ns from a timeval.
40 *
41 * After quite a bit of churning around, we've settled on
42 * a simple thing of taking 0.1% of the timeout as the
43 * slack, with a cap of 100 msec.
44 * "nice" tasks get a 0.5% slack instead.
45 *
46 * Consider this comment an open invitation to come up with even
47 * better solutions..
48 */
49
50#define MAX_SLACK (100 * NSEC_PER_MSEC)
51
52static long __estimate_accuracy(struct timespec64 *tv)
53{
54 long slack;
55 int divfactor = 1000;
56
57 if (tv->tv_sec < 0)
58 return 0;
59
60 if (task_nice(current) > 0)
61 divfactor = divfactor / 5;
62
63 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
64 return MAX_SLACK;
65
66 slack = tv->tv_nsec / divfactor;
67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
68
69 if (slack > MAX_SLACK)
70 return MAX_SLACK;
71
72 return slack;
73}
74
75u64 select_estimate_accuracy(struct timespec64 *tv)
76{
77 u64 ret;
78 struct timespec64 now;
79
80 /*
81 * Realtime tasks get a slack of 0 for obvious reasons.
82 */
83
84 if (rt_task(current))
85 return 0;
86
87 ktime_get_ts64(&now);
88 now = timespec64_sub(*tv, now);
89 ret = __estimate_accuracy(&now);
90 if (ret < current->timer_slack_ns)
91 return current->timer_slack_ns;
92 return ret;
93}
94
95
96
97struct poll_table_page {
98 struct poll_table_page * next;
99 struct poll_table_entry * entry;
100 struct poll_table_entry entries[0];
101};
102
103#define POLL_TABLE_FULL(table) \
104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
105
106/*
107 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
108 * I have rewritten this, taking some shortcuts: This code may not be easy to
109 * follow, but it should be free of race-conditions, and it's practical. If you
110 * understand what I'm doing here, then you understand how the linux
111 * sleep/wakeup mechanism works.
112 *
113 * Two very simple procedures, poll_wait() and poll_freewait() make all the
114 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
115 * as all select/poll functions have to call it to add an entry to the
116 * poll table.
117 */
118static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
119 poll_table *p);
120
121void poll_initwait(struct poll_wqueues *pwq)
122{
123 init_poll_funcptr(&pwq->pt, __pollwait);
124 pwq->polling_task = current;
125 pwq->triggered = 0;
126 pwq->error = 0;
127 pwq->table = NULL;
128 pwq->inline_index = 0;
129}
130EXPORT_SYMBOL(poll_initwait);
131
132static void free_poll_entry(struct poll_table_entry *entry)
133{
134 remove_wait_queue(entry->wait_address, &entry->wait);
135 fput(entry->filp);
136}
137
138void poll_freewait(struct poll_wqueues *pwq)
139{
140 struct poll_table_page * p = pwq->table;
141 int i;
142 for (i = 0; i < pwq->inline_index; i++)
143 free_poll_entry(pwq->inline_entries + i);
144 while (p) {
145 struct poll_table_entry * entry;
146 struct poll_table_page *old;
147
148 entry = p->entry;
149 do {
150 entry--;
151 free_poll_entry(entry);
152 } while (entry > p->entries);
153 old = p;
154 p = p->next;
155 free_page((unsigned long) old);
156 }
157}
158EXPORT_SYMBOL(poll_freewait);
159
160static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
161{
162 struct poll_table_page *table = p->table;
163
164 if (p->inline_index < N_INLINE_POLL_ENTRIES)
165 return p->inline_entries + p->inline_index++;
166
167 if (!table || POLL_TABLE_FULL(table)) {
168 struct poll_table_page *new_table;
169
170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
171 if (!new_table) {
172 p->error = -ENOMEM;
173 return NULL;
174 }
175 new_table->entry = new_table->entries;
176 new_table->next = table;
177 p->table = new_table;
178 table = new_table;
179 }
180
181 return table->entry++;
182}
183
184static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
185{
186 struct poll_wqueues *pwq = wait->private;
187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
188
189 /*
190 * Although this function is called under waitqueue lock, LOCK
191 * doesn't imply write barrier and the users expect write
192 * barrier semantics on wakeup functions. The following
193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
194 * and is paired with smp_store_mb() in poll_schedule_timeout.
195 */
196 smp_wmb();
197 pwq->triggered = 1;
198
199 /*
200 * Perform the default wake up operation using a dummy
201 * waitqueue.
202 *
203 * TODO: This is hacky but there currently is no interface to
204 * pass in @sync. @sync is scheduled to be removed and once
205 * that happens, wake_up_process() can be used directly.
206 */
207 return default_wake_function(&dummy_wait, mode, sync, key);
208}
209
210static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
211{
212 struct poll_table_entry *entry;
213
214 entry = container_of(wait, struct poll_table_entry, wait);
215 if (key && !(key_to_poll(key) & entry->key))
216 return 0;
217 return __pollwake(wait, mode, sync, key);
218}
219
220/* Add a new entry */
221static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
222 poll_table *p)
223{
224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
225 struct poll_table_entry *entry = poll_get_entry(pwq);
226 if (!entry)
227 return;
228 entry->filp = get_file(filp);
229 entry->wait_address = wait_address;
230 entry->key = p->_key;
231 init_waitqueue_func_entry(&entry->wait, pollwake);
232 entry->wait.private = pwq;
233 add_wait_queue(wait_address, &entry->wait);
234}
235
236int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
237 ktime_t *expires, unsigned long slack)
238{
239 int rc = -EINTR;
240
241 set_current_state(state);
242 if (!pwq->triggered)
243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
244 __set_current_state(TASK_RUNNING);
245
246 /*
247 * Prepare for the next iteration.
248 *
249 * The following smp_store_mb() serves two purposes. First, it's
250 * the counterpart rmb of the wmb in pollwake() such that data
251 * written before wake up is always visible after wake up.
252 * Second, the full barrier guarantees that triggered clearing
253 * doesn't pass event check of the next iteration. Note that
254 * this problem doesn't exist for the first iteration as
255 * add_wait_queue() has full barrier semantics.
256 */
257 smp_store_mb(pwq->triggered, 0);
258
259 return rc;
260}
261EXPORT_SYMBOL(poll_schedule_timeout);
262
263/**
264 * poll_select_set_timeout - helper function to setup the timeout value
265 * @to: pointer to timespec64 variable for the final timeout
266 * @sec: seconds (from user space)
267 * @nsec: nanoseconds (from user space)
268 *
269 * Note, we do not use a timespec for the user space value here, That
270 * way we can use the function for timeval and compat interfaces as well.
271 *
272 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
273 */
274int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
275{
276 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
277
278 if (!timespec64_valid(&ts))
279 return -EINVAL;
280
281 /* Optimize for the zero timeout value here */
282 if (!sec && !nsec) {
283 to->tv_sec = to->tv_nsec = 0;
284 } else {
285 ktime_get_ts64(to);
286 *to = timespec64_add_safe(*to, ts);
287 }
288 return 0;
289}
290
291static int poll_select_copy_remaining(struct timespec64 *end_time,
292 void __user *p,
293 int timeval, int ret)
294{
295 struct timespec64 rts;
296 struct timeval rtv;
297
298 if (!p)
299 return ret;
300
301 if (current->personality & STICKY_TIMEOUTS)
302 goto sticky;
303
304 /* No update for zero timeout */
305 if (!end_time->tv_sec && !end_time->tv_nsec)
306 return ret;
307
308 ktime_get_ts64(&rts);
309 rts = timespec64_sub(*end_time, rts);
310 if (rts.tv_sec < 0)
311 rts.tv_sec = rts.tv_nsec = 0;
312
313
314 if (timeval) {
315 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
316 memset(&rtv, 0, sizeof(rtv));
317 rtv.tv_sec = rts.tv_sec;
318 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
319
320 if (!copy_to_user(p, &rtv, sizeof(rtv)))
321 return ret;
322
323 } else if (!put_timespec64(&rts, p))
324 return ret;
325
326 /*
327 * If an application puts its timeval in read-only memory, we
328 * don't want the Linux-specific update to the timeval to
329 * cause a fault after the select has completed
330 * successfully. However, because we're not updating the
331 * timeval, we can't restart the system call.
332 */
333
334sticky:
335 if (ret == -ERESTARTNOHAND)
336 ret = -EINTR;
337 return ret;
338}
339
340/*
341 * Scalable version of the fd_set.
342 */
343
344typedef struct {
345 unsigned long *in, *out, *ex;
346 unsigned long *res_in, *res_out, *res_ex;
347} fd_set_bits;
348
349/*
350 * How many longwords for "nr" bits?
351 */
352#define FDS_BITPERLONG (8*sizeof(long))
353#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
354#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
355
356/*
357 * We do a VERIFY_WRITE here even though we are only reading this time:
358 * we'll write to it eventually..
359 *
360 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
361 */
362static inline
363int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
364{
365 nr = FDS_BYTES(nr);
366 if (ufdset)
367 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
368
369 memset(fdset, 0, nr);
370 return 0;
371}
372
373static inline unsigned long __must_check
374set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
375{
376 if (ufdset)
377 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
378 return 0;
379}
380
381static inline
382void zero_fd_set(unsigned long nr, unsigned long *fdset)
383{
384 memset(fdset, 0, FDS_BYTES(nr));
385}
386
387#define FDS_IN(fds, n) (fds->in + n)
388#define FDS_OUT(fds, n) (fds->out + n)
389#define FDS_EX(fds, n) (fds->ex + n)
390
391#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
392
393static int max_select_fd(unsigned long n, fd_set_bits *fds)
394{
395 unsigned long *open_fds;
396 unsigned long set;
397 int max;
398 struct fdtable *fdt;
399
400 /* handle last in-complete long-word first */
401 set = ~(~0UL << (n & (BITS_PER_LONG-1)));
402 n /= BITS_PER_LONG;
403 fdt = files_fdtable(current->files);
404 open_fds = fdt->open_fds + n;
405 max = 0;
406 if (set) {
407 set &= BITS(fds, n);
408 if (set) {
409 if (!(set & ~*open_fds))
410 goto get_max;
411 return -EBADF;
412 }
413 }
414 while (n) {
415 open_fds--;
416 n--;
417 set = BITS(fds, n);
418 if (!set)
419 continue;
420 if (set & ~*open_fds)
421 return -EBADF;
422 if (max)
423 continue;
424get_max:
425 do {
426 max++;
427 set >>= 1;
428 } while (set);
429 max += n * BITS_PER_LONG;
430 }
431
432 return max;
433}
434
435#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
436#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
437#define POLLEX_SET (EPOLLPRI)
438
439static inline void wait_key_set(poll_table *wait, unsigned long in,
440 unsigned long out, unsigned long bit,
441 __poll_t ll_flag)
442{
443 wait->_key = POLLEX_SET | ll_flag;
444 if (in & bit)
445 wait->_key |= POLLIN_SET;
446 if (out & bit)
447 wait->_key |= POLLOUT_SET;
448}
449
450static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
451{
452 ktime_t expire, *to = NULL;
453 struct poll_wqueues table;
454 poll_table *wait;
455 int retval, i, timed_out = 0;
456 u64 slack = 0;
457 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
458 unsigned long busy_start = 0;
459
460 rcu_read_lock();
461 retval = max_select_fd(n, fds);
462 rcu_read_unlock();
463
464 if (retval < 0)
465 return retval;
466 n = retval;
467
468 poll_initwait(&table);
469 wait = &table.pt;
470 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
471 wait->_qproc = NULL;
472 timed_out = 1;
473 }
474
475 if (end_time && !timed_out)
476 slack = select_estimate_accuracy(end_time);
477
478 retval = 0;
479 for (;;) {
480 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
481 bool can_busy_loop = false;
482
483 inp = fds->in; outp = fds->out; exp = fds->ex;
484 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
485
486 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
487 unsigned long in, out, ex, all_bits, bit = 1, j;
488 unsigned long res_in = 0, res_out = 0, res_ex = 0;
489 __poll_t mask;
490
491 in = *inp++; out = *outp++; ex = *exp++;
492 all_bits = in | out | ex;
493 if (all_bits == 0) {
494 i += BITS_PER_LONG;
495 continue;
496 }
497
498 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
499 struct fd f;
500 if (i >= n)
501 break;
502 if (!(bit & all_bits))
503 continue;
504 f = fdget(i);
505 if (f.file) {
506 const struct file_operations *f_op;
507 f_op = f.file->f_op;
508 mask = DEFAULT_POLLMASK;
509 if (f_op->poll) {
510 wait_key_set(wait, in, out,
511 bit, busy_flag);
512 mask = (*f_op->poll)(f.file, wait);
513 }
514 fdput(f);
515 if ((mask & POLLIN_SET) && (in & bit)) {
516 res_in |= bit;
517 retval++;
518 wait->_qproc = NULL;
519 }
520 if ((mask & POLLOUT_SET) && (out & bit)) {
521 res_out |= bit;
522 retval++;
523 wait->_qproc = NULL;
524 }
525 if ((mask & POLLEX_SET) && (ex & bit)) {
526 res_ex |= bit;
527 retval++;
528 wait->_qproc = NULL;
529 }
530 /* got something, stop busy polling */
531 if (retval) {
532 can_busy_loop = false;
533 busy_flag = 0;
534
535 /*
536 * only remember a returned
537 * POLL_BUSY_LOOP if we asked for it
538 */
539 } else if (busy_flag & mask)
540 can_busy_loop = true;
541
542 }
543 }
544 if (res_in)
545 *rinp = res_in;
546 if (res_out)
547 *routp = res_out;
548 if (res_ex)
549 *rexp = res_ex;
550 cond_resched();
551 }
552 wait->_qproc = NULL;
553 if (retval || timed_out || signal_pending(current))
554 break;
555 if (table.error) {
556 retval = table.error;
557 break;
558 }
559
560 /* only if found POLL_BUSY_LOOP sockets && not out of time */
561 if (can_busy_loop && !need_resched()) {
562 if (!busy_start) {
563 busy_start = busy_loop_current_time();
564 continue;
565 }
566 if (!busy_loop_timeout(busy_start))
567 continue;
568 }
569 busy_flag = 0;
570
571 /*
572 * If this is the first loop and we have a timeout
573 * given, then we convert to ktime_t and set the to
574 * pointer to the expiry value.
575 */
576 if (end_time && !to) {
577 expire = timespec64_to_ktime(*end_time);
578 to = &expire;
579 }
580
581 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
582 to, slack))
583 timed_out = 1;
584 }
585
586 poll_freewait(&table);
587
588 return retval;
589}
590
591/*
592 * We can actually return ERESTARTSYS instead of EINTR, but I'd
593 * like to be certain this leads to no problems. So I return
594 * EINTR just for safety.
595 *
596 * Update: ERESTARTSYS breaks at least the xview clock binary, so
597 * I'm trying ERESTARTNOHAND which restart only when you want to.
598 */
599int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
600 fd_set __user *exp, struct timespec64 *end_time)
601{
602 fd_set_bits fds;
603 void *bits;
604 int ret, max_fds;
605 size_t size, alloc_size;
606 struct fdtable *fdt;
607 /* Allocate small arguments on the stack to save memory and be faster */
608 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
609
610 ret = -EINVAL;
611 if (n < 0)
612 goto out_nofds;
613
614 /* max_fds can increase, so grab it once to avoid race */
615 rcu_read_lock();
616 fdt = files_fdtable(current->files);
617 max_fds = fdt->max_fds;
618 rcu_read_unlock();
619 if (n > max_fds)
620 n = max_fds;
621
622 /*
623 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
624 * since we used fdset we need to allocate memory in units of
625 * long-words.
626 */
627 size = FDS_BYTES(n);
628 bits = stack_fds;
629 if (size > sizeof(stack_fds) / 6) {
630 /* Not enough space in on-stack array; must use kmalloc */
631 ret = -ENOMEM;
632 if (size > (SIZE_MAX / 6))
633 goto out_nofds;
634
635 alloc_size = 6 * size;
636 bits = kvmalloc(alloc_size, GFP_KERNEL);
637 if (!bits)
638 goto out_nofds;
639 }
640 fds.in = bits;
641 fds.out = bits + size;
642 fds.ex = bits + 2*size;
643 fds.res_in = bits + 3*size;
644 fds.res_out = bits + 4*size;
645 fds.res_ex = bits + 5*size;
646
647 if ((ret = get_fd_set(n, inp, fds.in)) ||
648 (ret = get_fd_set(n, outp, fds.out)) ||
649 (ret = get_fd_set(n, exp, fds.ex)))
650 goto out;
651 zero_fd_set(n, fds.res_in);
652 zero_fd_set(n, fds.res_out);
653 zero_fd_set(n, fds.res_ex);
654
655 ret = do_select(n, &fds, end_time);
656
657 if (ret < 0)
658 goto out;
659 if (!ret) {
660 ret = -ERESTARTNOHAND;
661 if (signal_pending(current))
662 goto out;
663 ret = 0;
664 }
665
666 if (set_fd_set(n, inp, fds.res_in) ||
667 set_fd_set(n, outp, fds.res_out) ||
668 set_fd_set(n, exp, fds.res_ex))
669 ret = -EFAULT;
670
671out:
672 if (bits != stack_fds)
673 kvfree(bits);
674out_nofds:
675 return ret;
676}
677
678static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
679 fd_set __user *exp, struct timeval __user *tvp)
680{
681 struct timespec64 end_time, *to = NULL;
682 struct timeval tv;
683 int ret;
684
685 if (tvp) {
686 if (copy_from_user(&tv, tvp, sizeof(tv)))
687 return -EFAULT;
688
689 to = &end_time;
690 if (poll_select_set_timeout(to,
691 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
692 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
693 return -EINVAL;
694 }
695
696 ret = core_sys_select(n, inp, outp, exp, to);
697 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
698
699 return ret;
700}
701
702SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
703 fd_set __user *, exp, struct timeval __user *, tvp)
704{
705 return kern_select(n, inp, outp, exp, tvp);
706}
707
708static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
709 fd_set __user *exp, struct timespec __user *tsp,
710 const sigset_t __user *sigmask, size_t sigsetsize)
711{
712 sigset_t ksigmask, sigsaved;
713 struct timespec64 ts, end_time, *to = NULL;
714 int ret;
715
716 if (tsp) {
717 if (get_timespec64(&ts, tsp))
718 return -EFAULT;
719
720 to = &end_time;
721 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
722 return -EINVAL;
723 }
724
725 if (sigmask) {
726 /* XXX: Don't preclude handling different sized sigset_t's. */
727 if (sigsetsize != sizeof(sigset_t))
728 return -EINVAL;
729 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
730 return -EFAULT;
731
732 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
733 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
734 }
735
736 ret = core_sys_select(n, inp, outp, exp, to);
737 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
738
739 if (ret == -ERESTARTNOHAND) {
740 /*
741 * Don't restore the signal mask yet. Let do_signal() deliver
742 * the signal on the way back to userspace, before the signal
743 * mask is restored.
744 */
745 if (sigmask) {
746 memcpy(¤t->saved_sigmask, &sigsaved,
747 sizeof(sigsaved));
748 set_restore_sigmask();
749 }
750 } else if (sigmask)
751 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
752
753 return ret;
754}
755
756/*
757 * Most architectures can't handle 7-argument syscalls. So we provide a
758 * 6-argument version where the sixth argument is a pointer to a structure
759 * which has a pointer to the sigset_t itself followed by a size_t containing
760 * the sigset size.
761 */
762SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
763 fd_set __user *, exp, struct timespec __user *, tsp,
764 void __user *, sig)
765{
766 size_t sigsetsize = 0;
767 sigset_t __user *up = NULL;
768
769 if (sig) {
770 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
771 || __get_user(up, (sigset_t __user * __user *)sig)
772 || __get_user(sigsetsize,
773 (size_t __user *)(sig+sizeof(void *))))
774 return -EFAULT;
775 }
776
777 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
778}
779
780#ifdef __ARCH_WANT_SYS_OLD_SELECT
781struct sel_arg_struct {
782 unsigned long n;
783 fd_set __user *inp, *outp, *exp;
784 struct timeval __user *tvp;
785};
786
787SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
788{
789 struct sel_arg_struct a;
790
791 if (copy_from_user(&a, arg, sizeof(a)))
792 return -EFAULT;
793 return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
794}
795#endif
796
797struct poll_list {
798 struct poll_list *next;
799 int len;
800 struct pollfd entries[0];
801};
802
803#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
804
805/*
806 * Fish for pollable events on the pollfd->fd file descriptor. We're only
807 * interested in events matching the pollfd->events mask, and the result
808 * matching that mask is both recorded in pollfd->revents and returned. The
809 * pwait poll_table will be used by the fd-provided poll handler for waiting,
810 * if pwait->_qproc is non-NULL.
811 */
812static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
813 bool *can_busy_poll,
814 __poll_t busy_flag)
815{
816 __poll_t mask;
817 int fd;
818
819 mask = 0;
820 fd = pollfd->fd;
821 if (fd >= 0) {
822 struct fd f = fdget(fd);
823 mask = EPOLLNVAL;
824 if (f.file) {
825 /* userland u16 ->events contains POLL... bitmap */
826 __poll_t filter = demangle_poll(pollfd->events) |
827 EPOLLERR | EPOLLHUP;
828 mask = DEFAULT_POLLMASK;
829 if (f.file->f_op->poll) {
830 pwait->_key = filter;
831 pwait->_key |= busy_flag;
832 mask = f.file->f_op->poll(f.file, pwait);
833 if (mask & busy_flag)
834 *can_busy_poll = true;
835 }
836 /* Mask out unneeded events. */
837 mask &= filter;
838 fdput(f);
839 }
840 }
841 /* ... and so does ->revents */
842 pollfd->revents = mangle_poll(mask);
843
844 return mask;
845}
846
847static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
848 struct timespec64 *end_time)
849{
850 poll_table* pt = &wait->pt;
851 ktime_t expire, *to = NULL;
852 int timed_out = 0, count = 0;
853 u64 slack = 0;
854 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
855 unsigned long busy_start = 0;
856
857 /* Optimise the no-wait case */
858 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
859 pt->_qproc = NULL;
860 timed_out = 1;
861 }
862
863 if (end_time && !timed_out)
864 slack = select_estimate_accuracy(end_time);
865
866 for (;;) {
867 struct poll_list *walk;
868 bool can_busy_loop = false;
869
870 for (walk = list; walk != NULL; walk = walk->next) {
871 struct pollfd * pfd, * pfd_end;
872
873 pfd = walk->entries;
874 pfd_end = pfd + walk->len;
875 for (; pfd != pfd_end; pfd++) {
876 /*
877 * Fish for events. If we found one, record it
878 * and kill poll_table->_qproc, so we don't
879 * needlessly register any other waiters after
880 * this. They'll get immediately deregistered
881 * when we break out and return.
882 */
883 if (do_pollfd(pfd, pt, &can_busy_loop,
884 busy_flag)) {
885 count++;
886 pt->_qproc = NULL;
887 /* found something, stop busy polling */
888 busy_flag = 0;
889 can_busy_loop = false;
890 }
891 }
892 }
893 /*
894 * All waiters have already been registered, so don't provide
895 * a poll_table->_qproc to them on the next loop iteration.
896 */
897 pt->_qproc = NULL;
898 if (!count) {
899 count = wait->error;
900 if (signal_pending(current))
901 count = -EINTR;
902 }
903 if (count || timed_out)
904 break;
905
906 /* only if found POLL_BUSY_LOOP sockets && not out of time */
907 if (can_busy_loop && !need_resched()) {
908 if (!busy_start) {
909 busy_start = busy_loop_current_time();
910 continue;
911 }
912 if (!busy_loop_timeout(busy_start))
913 continue;
914 }
915 busy_flag = 0;
916
917 /*
918 * If this is the first loop and we have a timeout
919 * given, then we convert to ktime_t and set the to
920 * pointer to the expiry value.
921 */
922 if (end_time && !to) {
923 expire = timespec64_to_ktime(*end_time);
924 to = &expire;
925 }
926
927 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
928 timed_out = 1;
929 }
930 return count;
931}
932
933#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
934 sizeof(struct pollfd))
935
936static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
937 struct timespec64 *end_time)
938{
939 struct poll_wqueues table;
940 int err = -EFAULT, fdcount, len, size;
941 /* Allocate small arguments on the stack to save memory and be
942 faster - use long to make sure the buffer is aligned properly
943 on 64 bit archs to avoid unaligned access */
944 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
945 struct poll_list *const head = (struct poll_list *)stack_pps;
946 struct poll_list *walk = head;
947 unsigned long todo = nfds;
948
949 if (nfds > rlimit(RLIMIT_NOFILE))
950 return -EINVAL;
951
952 len = min_t(unsigned int, nfds, N_STACK_PPS);
953 for (;;) {
954 walk->next = NULL;
955 walk->len = len;
956 if (!len)
957 break;
958
959 if (copy_from_user(walk->entries, ufds + nfds-todo,
960 sizeof(struct pollfd) * walk->len))
961 goto out_fds;
962
963 todo -= walk->len;
964 if (!todo)
965 break;
966
967 len = min(todo, POLLFD_PER_PAGE);
968 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
969 walk = walk->next = kmalloc(size, GFP_KERNEL);
970 if (!walk) {
971 err = -ENOMEM;
972 goto out_fds;
973 }
974 }
975
976 poll_initwait(&table);
977 fdcount = do_poll(head, &table, end_time);
978 poll_freewait(&table);
979
980 for (walk = head; walk; walk = walk->next) {
981 struct pollfd *fds = walk->entries;
982 int j;
983
984 for (j = 0; j < walk->len; j++, ufds++)
985 if (__put_user(fds[j].revents, &ufds->revents))
986 goto out_fds;
987 }
988
989 err = fdcount;
990out_fds:
991 walk = head->next;
992 while (walk) {
993 struct poll_list *pos = walk;
994 walk = walk->next;
995 kfree(pos);
996 }
997
998 return err;
999}
1000
1001static long do_restart_poll(struct restart_block *restart_block)
1002{
1003 struct pollfd __user *ufds = restart_block->poll.ufds;
1004 int nfds = restart_block->poll.nfds;
1005 struct timespec64 *to = NULL, end_time;
1006 int ret;
1007
1008 if (restart_block->poll.has_timeout) {
1009 end_time.tv_sec = restart_block->poll.tv_sec;
1010 end_time.tv_nsec = restart_block->poll.tv_nsec;
1011 to = &end_time;
1012 }
1013
1014 ret = do_sys_poll(ufds, nfds, to);
1015
1016 if (ret == -EINTR) {
1017 restart_block->fn = do_restart_poll;
1018 ret = -ERESTART_RESTARTBLOCK;
1019 }
1020 return ret;
1021}
1022
1023SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1024 int, timeout_msecs)
1025{
1026 struct timespec64 end_time, *to = NULL;
1027 int ret;
1028
1029 if (timeout_msecs >= 0) {
1030 to = &end_time;
1031 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1032 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1033 }
1034
1035 ret = do_sys_poll(ufds, nfds, to);
1036
1037 if (ret == -EINTR) {
1038 struct restart_block *restart_block;
1039
1040 restart_block = ¤t->restart_block;
1041 restart_block->fn = do_restart_poll;
1042 restart_block->poll.ufds = ufds;
1043 restart_block->poll.nfds = nfds;
1044
1045 if (timeout_msecs >= 0) {
1046 restart_block->poll.tv_sec = end_time.tv_sec;
1047 restart_block->poll.tv_nsec = end_time.tv_nsec;
1048 restart_block->poll.has_timeout = 1;
1049 } else
1050 restart_block->poll.has_timeout = 0;
1051
1052 ret = -ERESTART_RESTARTBLOCK;
1053 }
1054 return ret;
1055}
1056
1057SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1058 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1059 size_t, sigsetsize)
1060{
1061 sigset_t ksigmask, sigsaved;
1062 struct timespec64 ts, end_time, *to = NULL;
1063 int ret;
1064
1065 if (tsp) {
1066 if (get_timespec64(&ts, tsp))
1067 return -EFAULT;
1068
1069 to = &end_time;
1070 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1071 return -EINVAL;
1072 }
1073
1074 if (sigmask) {
1075 /* XXX: Don't preclude handling different sized sigset_t's. */
1076 if (sigsetsize != sizeof(sigset_t))
1077 return -EINVAL;
1078 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1079 return -EFAULT;
1080
1081 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1082 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1083 }
1084
1085 ret = do_sys_poll(ufds, nfds, to);
1086
1087 /* We can restart this syscall, usually */
1088 if (ret == -EINTR) {
1089 /*
1090 * Don't restore the signal mask yet. Let do_signal() deliver
1091 * the signal on the way back to userspace, before the signal
1092 * mask is restored.
1093 */
1094 if (sigmask) {
1095 memcpy(¤t->saved_sigmask, &sigsaved,
1096 sizeof(sigsaved));
1097 set_restore_sigmask();
1098 }
1099 ret = -ERESTARTNOHAND;
1100 } else if (sigmask)
1101 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1102
1103 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1104
1105 return ret;
1106}
1107
1108#ifdef CONFIG_COMPAT
1109#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
1110
1111static
1112int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *p,
1113 int timeval, int ret)
1114{
1115 struct timespec64 ts;
1116
1117 if (!p)
1118 return ret;
1119
1120 if (current->personality & STICKY_TIMEOUTS)
1121 goto sticky;
1122
1123 /* No update for zero timeout */
1124 if (!end_time->tv_sec && !end_time->tv_nsec)
1125 return ret;
1126
1127 ktime_get_ts64(&ts);
1128 ts = timespec64_sub(*end_time, ts);
1129 if (ts.tv_sec < 0)
1130 ts.tv_sec = ts.tv_nsec = 0;
1131
1132 if (timeval) {
1133 struct compat_timeval rtv;
1134
1135 rtv.tv_sec = ts.tv_sec;
1136 rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
1137
1138 if (!copy_to_user(p, &rtv, sizeof(rtv)))
1139 return ret;
1140 } else {
1141 if (!compat_put_timespec64(&ts, p))
1142 return ret;
1143 }
1144 /*
1145 * If an application puts its timeval in read-only memory, we
1146 * don't want the Linux-specific update to the timeval to
1147 * cause a fault after the select has completed
1148 * successfully. However, because we're not updating the
1149 * timeval, we can't restart the system call.
1150 */
1151
1152sticky:
1153 if (ret == -ERESTARTNOHAND)
1154 ret = -EINTR;
1155 return ret;
1156}
1157
1158/*
1159 * Ooo, nasty. We need here to frob 32-bit unsigned longs to
1160 * 64-bit unsigned longs.
1161 */
1162static
1163int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1164 unsigned long *fdset)
1165{
1166 if (ufdset) {
1167 return compat_get_bitmap(fdset, ufdset, nr);
1168 } else {
1169 zero_fd_set(nr, fdset);
1170 return 0;
1171 }
1172}
1173
1174static
1175int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1176 unsigned long *fdset)
1177{
1178 if (!ufdset)
1179 return 0;
1180 return compat_put_bitmap(ufdset, fdset, nr);
1181}
1182
1183
1184/*
1185 * This is a virtual copy of sys_select from fs/select.c and probably
1186 * should be compared to it from time to time
1187 */
1188
1189/*
1190 * We can actually return ERESTARTSYS instead of EINTR, but I'd
1191 * like to be certain this leads to no problems. So I return
1192 * EINTR just for safety.
1193 *
1194 * Update: ERESTARTSYS breaks at least the xview clock binary, so
1195 * I'm trying ERESTARTNOHAND which restart only when you want to.
1196 */
1197static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1198 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1199 struct timespec64 *end_time)
1200{
1201 fd_set_bits fds;
1202 void *bits;
1203 int size, max_fds, ret = -EINVAL;
1204 struct fdtable *fdt;
1205 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1206
1207 if (n < 0)
1208 goto out_nofds;
1209
1210 /* max_fds can increase, so grab it once to avoid race */
1211 rcu_read_lock();
1212 fdt = files_fdtable(current->files);
1213 max_fds = fdt->max_fds;
1214 rcu_read_unlock();
1215 if (n > max_fds)
1216 n = max_fds;
1217
1218 /*
1219 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1220 * since we used fdset we need to allocate memory in units of
1221 * long-words.
1222 */
1223 size = FDS_BYTES(n);
1224 bits = stack_fds;
1225 if (size > sizeof(stack_fds) / 6) {
1226 bits = kmalloc(6 * size, GFP_KERNEL);
1227 ret = -ENOMEM;
1228 if (!bits)
1229 goto out_nofds;
1230 }
1231 fds.in = (unsigned long *) bits;
1232 fds.out = (unsigned long *) (bits + size);
1233 fds.ex = (unsigned long *) (bits + 2*size);
1234 fds.res_in = (unsigned long *) (bits + 3*size);
1235 fds.res_out = (unsigned long *) (bits + 4*size);
1236 fds.res_ex = (unsigned long *) (bits + 5*size);
1237
1238 if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1239 (ret = compat_get_fd_set(n, outp, fds.out)) ||
1240 (ret = compat_get_fd_set(n, exp, fds.ex)))
1241 goto out;
1242 zero_fd_set(n, fds.res_in);
1243 zero_fd_set(n, fds.res_out);
1244 zero_fd_set(n, fds.res_ex);
1245
1246 ret = do_select(n, &fds, end_time);
1247
1248 if (ret < 0)
1249 goto out;
1250 if (!ret) {
1251 ret = -ERESTARTNOHAND;
1252 if (signal_pending(current))
1253 goto out;
1254 ret = 0;
1255 }
1256
1257 if (compat_set_fd_set(n, inp, fds.res_in) ||
1258 compat_set_fd_set(n, outp, fds.res_out) ||
1259 compat_set_fd_set(n, exp, fds.res_ex))
1260 ret = -EFAULT;
1261out:
1262 if (bits != stack_fds)
1263 kfree(bits);
1264out_nofds:
1265 return ret;
1266}
1267
1268static int do_compat_select(int n, compat_ulong_t __user *inp,
1269 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1270 struct compat_timeval __user *tvp)
1271{
1272 struct timespec64 end_time, *to = NULL;
1273 struct compat_timeval tv;
1274 int ret;
1275
1276 if (tvp) {
1277 if (copy_from_user(&tv, tvp, sizeof(tv)))
1278 return -EFAULT;
1279
1280 to = &end_time;
1281 if (poll_select_set_timeout(to,
1282 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1283 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1284 return -EINVAL;
1285 }
1286
1287 ret = compat_core_sys_select(n, inp, outp, exp, to);
1288 ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret);
1289
1290 return ret;
1291}
1292
1293COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1294 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1295 struct compat_timeval __user *, tvp)
1296{
1297 return do_compat_select(n, inp, outp, exp, tvp);
1298}
1299
1300struct compat_sel_arg_struct {
1301 compat_ulong_t n;
1302 compat_uptr_t inp;
1303 compat_uptr_t outp;
1304 compat_uptr_t exp;
1305 compat_uptr_t tvp;
1306};
1307
1308COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1309{
1310 struct compat_sel_arg_struct a;
1311
1312 if (copy_from_user(&a, arg, sizeof(a)))
1313 return -EFAULT;
1314 return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1315 compat_ptr(a.exp), compat_ptr(a.tvp));
1316}
1317
1318static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1319 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1320 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
1321 compat_size_t sigsetsize)
1322{
1323 sigset_t ksigmask, sigsaved;
1324 struct timespec64 ts, end_time, *to = NULL;
1325 int ret;
1326
1327 if (tsp) {
1328 if (compat_get_timespec64(&ts, tsp))
1329 return -EFAULT;
1330
1331 to = &end_time;
1332 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1333 return -EINVAL;
1334 }
1335
1336 if (sigmask) {
1337 if (sigsetsize != sizeof(compat_sigset_t))
1338 return -EINVAL;
1339 if (get_compat_sigset(&ksigmask, sigmask))
1340 return -EFAULT;
1341
1342 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1343 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1344 }
1345
1346 ret = compat_core_sys_select(n, inp, outp, exp, to);
1347 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret);
1348
1349 if (ret == -ERESTARTNOHAND) {
1350 /*
1351 * Don't restore the signal mask yet. Let do_signal() deliver
1352 * the signal on the way back to userspace, before the signal
1353 * mask is restored.
1354 */
1355 if (sigmask) {
1356 memcpy(¤t->saved_sigmask, &sigsaved,
1357 sizeof(sigsaved));
1358 set_restore_sigmask();
1359 }
1360 } else if (sigmask)
1361 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1362
1363 return ret;
1364}
1365
1366COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
1367 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1368 struct compat_timespec __user *, tsp, void __user *, sig)
1369{
1370 compat_size_t sigsetsize = 0;
1371 compat_uptr_t up = 0;
1372
1373 if (sig) {
1374 if (!access_ok(VERIFY_READ, sig,
1375 sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
1376 __get_user(up, (compat_uptr_t __user *)sig) ||
1377 __get_user(sigsetsize,
1378 (compat_size_t __user *)(sig+sizeof(up))))
1379 return -EFAULT;
1380 }
1381 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
1382 sigsetsize);
1383}
1384
1385COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
1386 unsigned int, nfds, struct compat_timespec __user *, tsp,
1387 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1388{
1389 sigset_t ksigmask, sigsaved;
1390 struct timespec64 ts, end_time, *to = NULL;
1391 int ret;
1392
1393 if (tsp) {
1394 if (compat_get_timespec64(&ts, tsp))
1395 return -EFAULT;
1396
1397 to = &end_time;
1398 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1399 return -EINVAL;
1400 }
1401
1402 if (sigmask) {
1403 if (sigsetsize != sizeof(compat_sigset_t))
1404 return -EINVAL;
1405 if (get_compat_sigset(&ksigmask, sigmask))
1406 return -EFAULT;
1407
1408 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1409 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1410 }
1411
1412 ret = do_sys_poll(ufds, nfds, to);
1413
1414 /* We can restart this syscall, usually */
1415 if (ret == -EINTR) {
1416 /*
1417 * Don't restore the signal mask yet. Let do_signal() deliver
1418 * the signal on the way back to userspace, before the signal
1419 * mask is restored.
1420 */
1421 if (sigmask) {
1422 memcpy(¤t->saved_sigmask, &sigsaved,
1423 sizeof(sigsaved));
1424 set_restore_sigmask();
1425 }
1426 ret = -ERESTARTNOHAND;
1427 } else if (sigmask)
1428 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1429
1430 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret);
1431
1432 return ret;
1433}
1434#endif