Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * This file contains the procedures for the handling of select and poll
   3 *
   4 * Created for Linux based loosely upon Mathius Lattner's minix
   5 * patches by Peter MacDonald. Heavily edited by Linus.
   6 *
   7 *  4 February 1994
   8 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
   9 *     flag set in its personality we do *not* modify the given timeout
  10 *     parameter to reflect time remaining.
  11 *
  12 *  24 January 2000
  13 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
  14 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/sched.h>
  19#include <linux/syscalls.h>
  20#include <linux/export.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
  24#include <linux/file.h>
  25#include <linux/fdtable.h>
  26#include <linux/fs.h>
  27#include <linux/rcupdate.h>
  28#include <linux/hrtimer.h>
  29#include <linux/sched/rt.h>
  30#include <linux/freezer.h>
  31#include <net/busy_poll.h>
  32#include <linux/vmalloc.h>
  33
  34#include <linux/uaccess.h>
  35
  36
  37/*
  38 * Estimate expected accuracy in ns from a timeval.
  39 *
  40 * After quite a bit of churning around, we've settled on
  41 * a simple thing of taking 0.1% of the timeout as the
  42 * slack, with a cap of 100 msec.
  43 * "nice" tasks get a 0.5% slack instead.
  44 *
  45 * Consider this comment an open invitation to come up with even
  46 * better solutions..
  47 */
  48
  49#define MAX_SLACK	(100 * NSEC_PER_MSEC)
  50
  51static long __estimate_accuracy(struct timespec64 *tv)
  52{
  53	long slack;
  54	int divfactor = 1000;
  55
  56	if (tv->tv_sec < 0)
  57		return 0;
  58
  59	if (task_nice(current) > 0)
  60		divfactor = divfactor / 5;
  61
  62	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
  63		return MAX_SLACK;
  64
  65	slack = tv->tv_nsec / divfactor;
  66	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
  67
  68	if (slack > MAX_SLACK)
  69		return MAX_SLACK;
  70
  71	return slack;
  72}
  73
  74u64 select_estimate_accuracy(struct timespec64 *tv)
  75{
  76	u64 ret;
  77	struct timespec64 now;
  78
  79	/*
  80	 * Realtime tasks get a slack of 0 for obvious reasons.
  81	 */
  82
  83	if (rt_task(current))
  84		return 0;
  85
  86	ktime_get_ts64(&now);
  87	now = timespec64_sub(*tv, now);
  88	ret = __estimate_accuracy(&now);
  89	if (ret < current->timer_slack_ns)
  90		return current->timer_slack_ns;
  91	return ret;
  92}
  93
  94
  95
  96struct poll_table_page {
  97	struct poll_table_page * next;
  98	struct poll_table_entry * entry;
  99	struct poll_table_entry entries[0];
 100};
 101
 102#define POLL_TABLE_FULL(table) \
 103	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
 104
 105/*
 106 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
 107 * I have rewritten this, taking some shortcuts: This code may not be easy to
 108 * follow, but it should be free of race-conditions, and it's practical. If you
 109 * understand what I'm doing here, then you understand how the linux
 110 * sleep/wakeup mechanism works.
 111 *
 112 * Two very simple procedures, poll_wait() and poll_freewait() make all the
 113 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
 114 * as all select/poll functions have to call it to add an entry to the
 115 * poll table.
 116 */
 117static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 118		       poll_table *p);
 119
 120void poll_initwait(struct poll_wqueues *pwq)
 121{
 122	init_poll_funcptr(&pwq->pt, __pollwait);
 123	pwq->polling_task = current;
 124	pwq->triggered = 0;
 125	pwq->error = 0;
 126	pwq->table = NULL;
 127	pwq->inline_index = 0;
 128}
 129EXPORT_SYMBOL(poll_initwait);
 130
 131static void free_poll_entry(struct poll_table_entry *entry)
 132{
 133	remove_wait_queue(entry->wait_address, &entry->wait);
 134	fput(entry->filp);
 135}
 136
 137void poll_freewait(struct poll_wqueues *pwq)
 138{
 139	struct poll_table_page * p = pwq->table;
 140	int i;
 141	for (i = 0; i < pwq->inline_index; i++)
 142		free_poll_entry(pwq->inline_entries + i);
 143	while (p) {
 144		struct poll_table_entry * entry;
 145		struct poll_table_page *old;
 146
 147		entry = p->entry;
 148		do {
 149			entry--;
 150			free_poll_entry(entry);
 151		} while (entry > p->entries);
 152		old = p;
 153		p = p->next;
 154		free_page((unsigned long) old);
 155	}
 156}
 157EXPORT_SYMBOL(poll_freewait);
 158
 159static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 160{
 161	struct poll_table_page *table = p->table;
 162
 163	if (p->inline_index < N_INLINE_POLL_ENTRIES)
 164		return p->inline_entries + p->inline_index++;
 165
 166	if (!table || POLL_TABLE_FULL(table)) {
 167		struct poll_table_page *new_table;
 168
 169		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
 170		if (!new_table) {
 171			p->error = -ENOMEM;
 172			return NULL;
 173		}
 174		new_table->entry = new_table->entries;
 175		new_table->next = table;
 176		p->table = new_table;
 177		table = new_table;
 178	}
 179
 180	return table->entry++;
 181}
 182
 183static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 184{
 185	struct poll_wqueues *pwq = wait->private;
 186	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
 187
 188	/*
 189	 * Although this function is called under waitqueue lock, LOCK
 190	 * doesn't imply write barrier and the users expect write
 191	 * barrier semantics on wakeup functions.  The following
 192	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
 193	 * and is paired with smp_store_mb() in poll_schedule_timeout.
 194	 */
 195	smp_wmb();
 196	pwq->triggered = 1;
 197
 198	/*
 199	 * Perform the default wake up operation using a dummy
 200	 * waitqueue.
 201	 *
 202	 * TODO: This is hacky but there currently is no interface to
 203	 * pass in @sync.  @sync is scheduled to be removed and once
 204	 * that happens, wake_up_process() can be used directly.
 205	 */
 206	return default_wake_function(&dummy_wait, mode, sync, key);
 207}
 208
 209static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 210{
 211	struct poll_table_entry *entry;
 212
 213	entry = container_of(wait, struct poll_table_entry, wait);
 214	if (key && !((unsigned long)key & entry->key))
 215		return 0;
 216	return __pollwake(wait, mode, sync, key);
 217}
 218
 219/* Add a new entry */
 220static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 221				poll_table *p)
 222{
 223	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
 224	struct poll_table_entry *entry = poll_get_entry(pwq);
 225	if (!entry)
 226		return;
 227	entry->filp = get_file(filp);
 228	entry->wait_address = wait_address;
 229	entry->key = p->_key;
 230	init_waitqueue_func_entry(&entry->wait, pollwake);
 231	entry->wait.private = pwq;
 232	add_wait_queue(wait_address, &entry->wait);
 233}
 234
 235int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 236			  ktime_t *expires, unsigned long slack)
 237{
 238	int rc = -EINTR;
 239
 240	set_current_state(state);
 241	if (!pwq->triggered)
 242		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
 243	__set_current_state(TASK_RUNNING);
 244
 245	/*
 246	 * Prepare for the next iteration.
 247	 *
 248	 * The following smp_store_mb() serves two purposes.  First, it's
 249	 * the counterpart rmb of the wmb in pollwake() such that data
 250	 * written before wake up is always visible after wake up.
 251	 * Second, the full barrier guarantees that triggered clearing
 252	 * doesn't pass event check of the next iteration.  Note that
 253	 * this problem doesn't exist for the first iteration as
 254	 * add_wait_queue() has full barrier semantics.
 255	 */
 256	smp_store_mb(pwq->triggered, 0);
 257
 258	return rc;
 259}
 260EXPORT_SYMBOL(poll_schedule_timeout);
 261
 262/**
 263 * poll_select_set_timeout - helper function to setup the timeout value
 264 * @to:		pointer to timespec64 variable for the final timeout
 265 * @sec:	seconds (from user space)
 266 * @nsec:	nanoseconds (from user space)
 267 *
 268 * Note, we do not use a timespec for the user space value here, That
 269 * way we can use the function for timeval and compat interfaces as well.
 270 *
 271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
 272 */
 273int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
 274{
 275	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
 276
 277	if (!timespec64_valid(&ts))
 278		return -EINVAL;
 279
 280	/* Optimize for the zero timeout value here */
 281	if (!sec && !nsec) {
 282		to->tv_sec = to->tv_nsec = 0;
 283	} else {
 284		ktime_get_ts64(to);
 285		*to = timespec64_add_safe(*to, ts);
 286	}
 287	return 0;
 288}
 289
 290static int poll_select_copy_remaining(struct timespec64 *end_time,
 291				      void __user *p,
 292				      int timeval, int ret)
 293{
 294	struct timespec64 rts64;
 295	struct timespec rts;
 296	struct timeval rtv;
 297
 298	if (!p)
 299		return ret;
 300
 301	if (current->personality & STICKY_TIMEOUTS)
 302		goto sticky;
 303
 304	/* No update for zero timeout */
 305	if (!end_time->tv_sec && !end_time->tv_nsec)
 306		return ret;
 307
 308	ktime_get_ts64(&rts64);
 309	rts64 = timespec64_sub(*end_time, rts64);
 310	if (rts64.tv_sec < 0)
 311		rts64.tv_sec = rts64.tv_nsec = 0;
 312
 313	rts = timespec64_to_timespec(rts64);
 314
 315	if (timeval) {
 316		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
 317			memset(&rtv, 0, sizeof(rtv));
 318		rtv.tv_sec = rts64.tv_sec;
 319		rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC;
 320
 321		if (!copy_to_user(p, &rtv, sizeof(rtv)))
 322			return ret;
 323
 324	} else if (!copy_to_user(p, &rts, sizeof(rts)))
 325		return ret;
 326
 327	/*
 328	 * If an application puts its timeval in read-only memory, we
 329	 * don't want the Linux-specific update to the timeval to
 330	 * cause a fault after the select has completed
 331	 * successfully. However, because we're not updating the
 332	 * timeval, we can't restart the system call.
 333	 */
 334
 335sticky:
 336	if (ret == -ERESTARTNOHAND)
 337		ret = -EINTR;
 338	return ret;
 339}
 340
 341#define FDS_IN(fds, n)		(fds->in + n)
 342#define FDS_OUT(fds, n)		(fds->out + n)
 343#define FDS_EX(fds, n)		(fds->ex + n)
 344
 345#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
 346
 347static int max_select_fd(unsigned long n, fd_set_bits *fds)
 348{
 349	unsigned long *open_fds;
 350	unsigned long set;
 351	int max;
 352	struct fdtable *fdt;
 353
 354	/* handle last in-complete long-word first */
 355	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
 356	n /= BITS_PER_LONG;
 357	fdt = files_fdtable(current->files);
 358	open_fds = fdt->open_fds + n;
 359	max = 0;
 360	if (set) {
 361		set &= BITS(fds, n);
 362		if (set) {
 363			if (!(set & ~*open_fds))
 364				goto get_max;
 365			return -EBADF;
 366		}
 367	}
 368	while (n) {
 369		open_fds--;
 370		n--;
 371		set = BITS(fds, n);
 372		if (!set)
 373			continue;
 374		if (set & ~*open_fds)
 375			return -EBADF;
 376		if (max)
 377			continue;
 378get_max:
 379		do {
 380			max++;
 381			set >>= 1;
 382		} while (set);
 383		max += n * BITS_PER_LONG;
 384	}
 385
 386	return max;
 387}
 388
 389#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
 390#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 391#define POLLEX_SET (POLLPRI)
 392
 393static inline void wait_key_set(poll_table *wait, unsigned long in,
 394				unsigned long out, unsigned long bit,
 395				unsigned int ll_flag)
 396{
 397	wait->_key = POLLEX_SET | ll_flag;
 398	if (in & bit)
 399		wait->_key |= POLLIN_SET;
 400	if (out & bit)
 401		wait->_key |= POLLOUT_SET;
 402}
 403
 404int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
 405{
 406	ktime_t expire, *to = NULL;
 407	struct poll_wqueues table;
 408	poll_table *wait;
 409	int retval, i, timed_out = 0;
 410	u64 slack = 0;
 411	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 412	unsigned long busy_end = 0;
 413
 414	rcu_read_lock();
 415	retval = max_select_fd(n, fds);
 416	rcu_read_unlock();
 417
 418	if (retval < 0)
 419		return retval;
 420	n = retval;
 421
 422	poll_initwait(&table);
 423	wait = &table.pt;
 424	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 425		wait->_qproc = NULL;
 426		timed_out = 1;
 427	}
 428
 429	if (end_time && !timed_out)
 430		slack = select_estimate_accuracy(end_time);
 431
 432	retval = 0;
 433	for (;;) {
 434		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
 435		bool can_busy_loop = false;
 436
 437		inp = fds->in; outp = fds->out; exp = fds->ex;
 438		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
 439
 440		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
 441			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
 442			unsigned long res_in = 0, res_out = 0, res_ex = 0;
 443
 444			in = *inp++; out = *outp++; ex = *exp++;
 445			all_bits = in | out | ex;
 446			if (all_bits == 0) {
 447				i += BITS_PER_LONG;
 448				continue;
 449			}
 450
 451			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
 452				struct fd f;
 453				if (i >= n)
 454					break;
 455				if (!(bit & all_bits))
 456					continue;
 457				f = fdget(i);
 458				if (f.file) {
 459					const struct file_operations *f_op;
 460					f_op = f.file->f_op;
 461					mask = DEFAULT_POLLMASK;
 462					if (f_op->poll) {
 463						wait_key_set(wait, in, out,
 464							     bit, busy_flag);
 465						mask = (*f_op->poll)(f.file, wait);
 466					}
 467					fdput(f);
 468					if ((mask & POLLIN_SET) && (in & bit)) {
 469						res_in |= bit;
 470						retval++;
 471						wait->_qproc = NULL;
 472					}
 473					if ((mask & POLLOUT_SET) && (out & bit)) {
 474						res_out |= bit;
 475						retval++;
 476						wait->_qproc = NULL;
 477					}
 478					if ((mask & POLLEX_SET) && (ex & bit)) {
 479						res_ex |= bit;
 480						retval++;
 481						wait->_qproc = NULL;
 482					}
 483					/* got something, stop busy polling */
 484					if (retval) {
 485						can_busy_loop = false;
 486						busy_flag = 0;
 487
 488					/*
 489					 * only remember a returned
 490					 * POLL_BUSY_LOOP if we asked for it
 491					 */
 492					} else if (busy_flag & mask)
 493						can_busy_loop = true;
 494
 495				}
 496			}
 497			if (res_in)
 498				*rinp = res_in;
 499			if (res_out)
 500				*routp = res_out;
 501			if (res_ex)
 502				*rexp = res_ex;
 503			cond_resched();
 504		}
 505		wait->_qproc = NULL;
 506		if (retval || timed_out || signal_pending(current))
 507			break;
 508		if (table.error) {
 509			retval = table.error;
 510			break;
 511		}
 512
 513		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 514		if (can_busy_loop && !need_resched()) {
 515			if (!busy_end) {
 516				busy_end = busy_loop_end_time();
 517				continue;
 518			}
 519			if (!busy_loop_timeout(busy_end))
 520				continue;
 521		}
 522		busy_flag = 0;
 523
 524		/*
 525		 * If this is the first loop and we have a timeout
 526		 * given, then we convert to ktime_t and set the to
 527		 * pointer to the expiry value.
 528		 */
 529		if (end_time && !to) {
 530			expire = timespec64_to_ktime(*end_time);
 531			to = &expire;
 532		}
 533
 534		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
 535					   to, slack))
 536			timed_out = 1;
 537	}
 538
 539	poll_freewait(&table);
 540
 541	return retval;
 542}
 543
 544/*
 545 * We can actually return ERESTARTSYS instead of EINTR, but I'd
 546 * like to be certain this leads to no problems. So I return
 547 * EINTR just for safety.
 548 *
 549 * Update: ERESTARTSYS breaks at least the xview clock binary, so
 550 * I'm trying ERESTARTNOHAND which restart only when you want to.
 551 */
 552int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
 553			   fd_set __user *exp, struct timespec64 *end_time)
 554{
 555	fd_set_bits fds;
 556	void *bits;
 557	int ret, max_fds;
 558	size_t size, alloc_size;
 559	struct fdtable *fdt;
 560	/* Allocate small arguments on the stack to save memory and be faster */
 561	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
 562
 563	ret = -EINVAL;
 564	if (n < 0)
 565		goto out_nofds;
 566
 567	/* max_fds can increase, so grab it once to avoid race */
 568	rcu_read_lock();
 569	fdt = files_fdtable(current->files);
 570	max_fds = fdt->max_fds;
 571	rcu_read_unlock();
 572	if (n > max_fds)
 573		n = max_fds;
 574
 575	/*
 576	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
 577	 * since we used fdset we need to allocate memory in units of
 578	 * long-words. 
 579	 */
 580	size = FDS_BYTES(n);
 581	bits = stack_fds;
 582	if (size > sizeof(stack_fds) / 6) {
 583		/* Not enough space in on-stack array; must use kmalloc */
 584		ret = -ENOMEM;
 585		if (size > (SIZE_MAX / 6))
 586			goto out_nofds;
 587
 588		alloc_size = 6 * size;
 589		bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
 590		if (!bits && alloc_size > PAGE_SIZE)
 591			bits = vmalloc(alloc_size);
 592
 593		if (!bits)
 594			goto out_nofds;
 595	}
 596	fds.in      = bits;
 597	fds.out     = bits +   size;
 598	fds.ex      = bits + 2*size;
 599	fds.res_in  = bits + 3*size;
 600	fds.res_out = bits + 4*size;
 601	fds.res_ex  = bits + 5*size;
 602
 603	if ((ret = get_fd_set(n, inp, fds.in)) ||
 604	    (ret = get_fd_set(n, outp, fds.out)) ||
 605	    (ret = get_fd_set(n, exp, fds.ex)))
 606		goto out;
 607	zero_fd_set(n, fds.res_in);
 608	zero_fd_set(n, fds.res_out);
 609	zero_fd_set(n, fds.res_ex);
 610
 611	ret = do_select(n, &fds, end_time);
 612
 613	if (ret < 0)
 614		goto out;
 615	if (!ret) {
 616		ret = -ERESTARTNOHAND;
 617		if (signal_pending(current))
 618			goto out;
 619		ret = 0;
 620	}
 621
 622	if (set_fd_set(n, inp, fds.res_in) ||
 623	    set_fd_set(n, outp, fds.res_out) ||
 624	    set_fd_set(n, exp, fds.res_ex))
 625		ret = -EFAULT;
 626
 627out:
 628	if (bits != stack_fds)
 629		kvfree(bits);
 630out_nofds:
 631	return ret;
 632}
 633
 634SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 635		fd_set __user *, exp, struct timeval __user *, tvp)
 636{
 637	struct timespec64 end_time, *to = NULL;
 638	struct timeval tv;
 639	int ret;
 640
 641	if (tvp) {
 642		if (copy_from_user(&tv, tvp, sizeof(tv)))
 643			return -EFAULT;
 644
 645		to = &end_time;
 646		if (poll_select_set_timeout(to,
 647				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
 648				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
 649			return -EINVAL;
 650	}
 651
 652	ret = core_sys_select(n, inp, outp, exp, to);
 653	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
 654
 655	return ret;
 656}
 657
 658static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
 659		       fd_set __user *exp, struct timespec __user *tsp,
 660		       const sigset_t __user *sigmask, size_t sigsetsize)
 661{
 662	sigset_t ksigmask, sigsaved;
 663	struct timespec ts;
 664	struct timespec64 ts64, end_time, *to = NULL;
 665	int ret;
 666
 667	if (tsp) {
 668		if (copy_from_user(&ts, tsp, sizeof(ts)))
 669			return -EFAULT;
 670		ts64 = timespec_to_timespec64(ts);
 671
 672		to = &end_time;
 673		if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec))
 674			return -EINVAL;
 675	}
 676
 677	if (sigmask) {
 678		/* XXX: Don't preclude handling different sized sigset_t's.  */
 679		if (sigsetsize != sizeof(sigset_t))
 680			return -EINVAL;
 681		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
 682			return -EFAULT;
 683
 684		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
 685		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 686	}
 687
 688	ret = core_sys_select(n, inp, outp, exp, to);
 689	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 690
 691	if (ret == -ERESTARTNOHAND) {
 692		/*
 693		 * Don't restore the signal mask yet. Let do_signal() deliver
 694		 * the signal on the way back to userspace, before the signal
 695		 * mask is restored.
 696		 */
 697		if (sigmask) {
 698			memcpy(&current->saved_sigmask, &sigsaved,
 699					sizeof(sigsaved));
 700			set_restore_sigmask();
 701		}
 702	} else if (sigmask)
 703		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 704
 705	return ret;
 706}
 707
 708/*
 709 * Most architectures can't handle 7-argument syscalls. So we provide a
 710 * 6-argument version where the sixth argument is a pointer to a structure
 711 * which has a pointer to the sigset_t itself followed by a size_t containing
 712 * the sigset size.
 713 */
 714SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 715		fd_set __user *, exp, struct timespec __user *, tsp,
 716		void __user *, sig)
 717{
 718	size_t sigsetsize = 0;
 719	sigset_t __user *up = NULL;
 720
 721	if (sig) {
 722		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
 723		    || __get_user(up, (sigset_t __user * __user *)sig)
 724		    || __get_user(sigsetsize,
 725				(size_t __user *)(sig+sizeof(void *))))
 726			return -EFAULT;
 727	}
 728
 729	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 730}
 731
 732#ifdef __ARCH_WANT_SYS_OLD_SELECT
 733struct sel_arg_struct {
 734	unsigned long n;
 735	fd_set __user *inp, *outp, *exp;
 736	struct timeval __user *tvp;
 737};
 738
 739SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
 740{
 741	struct sel_arg_struct a;
 742
 743	if (copy_from_user(&a, arg, sizeof(a)))
 744		return -EFAULT;
 745	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
 746}
 747#endif
 748
 749struct poll_list {
 750	struct poll_list *next;
 751	int len;
 752	struct pollfd entries[0];
 753};
 754
 755#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
 756
 757/*
 758 * Fish for pollable events on the pollfd->fd file descriptor. We're only
 759 * interested in events matching the pollfd->events mask, and the result
 760 * matching that mask is both recorded in pollfd->revents and returned. The
 761 * pwait poll_table will be used by the fd-provided poll handler for waiting,
 762 * if pwait->_qproc is non-NULL.
 763 */
 764static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
 765				     bool *can_busy_poll,
 766				     unsigned int busy_flag)
 767{
 768	unsigned int mask;
 769	int fd;
 770
 771	mask = 0;
 772	fd = pollfd->fd;
 773	if (fd >= 0) {
 774		struct fd f = fdget(fd);
 775		mask = POLLNVAL;
 776		if (f.file) {
 777			mask = DEFAULT_POLLMASK;
 778			if (f.file->f_op->poll) {
 779				pwait->_key = pollfd->events|POLLERR|POLLHUP;
 780				pwait->_key |= busy_flag;
 781				mask = f.file->f_op->poll(f.file, pwait);
 782				if (mask & busy_flag)
 783					*can_busy_poll = true;
 784			}
 785			/* Mask out unneeded events. */
 786			mask &= pollfd->events | POLLERR | POLLHUP;
 787			fdput(f);
 788		}
 789	}
 790	pollfd->revents = mask;
 791
 792	return mask;
 793}
 794
 795static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
 796		   struct timespec64 *end_time)
 797{
 798	poll_table* pt = &wait->pt;
 799	ktime_t expire, *to = NULL;
 800	int timed_out = 0, count = 0;
 801	u64 slack = 0;
 802	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 803	unsigned long busy_end = 0;
 804
 805	/* Optimise the no-wait case */
 806	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 807		pt->_qproc = NULL;
 808		timed_out = 1;
 809	}
 810
 811	if (end_time && !timed_out)
 812		slack = select_estimate_accuracy(end_time);
 813
 814	for (;;) {
 815		struct poll_list *walk;
 816		bool can_busy_loop = false;
 817
 818		for (walk = list; walk != NULL; walk = walk->next) {
 819			struct pollfd * pfd, * pfd_end;
 820
 821			pfd = walk->entries;
 822			pfd_end = pfd + walk->len;
 823			for (; pfd != pfd_end; pfd++) {
 824				/*
 825				 * Fish for events. If we found one, record it
 826				 * and kill poll_table->_qproc, so we don't
 827				 * needlessly register any other waiters after
 828				 * this. They'll get immediately deregistered
 829				 * when we break out and return.
 830				 */
 831				if (do_pollfd(pfd, pt, &can_busy_loop,
 832					      busy_flag)) {
 833					count++;
 834					pt->_qproc = NULL;
 835					/* found something, stop busy polling */
 836					busy_flag = 0;
 837					can_busy_loop = false;
 838				}
 839			}
 840		}
 841		/*
 842		 * All waiters have already been registered, so don't provide
 843		 * a poll_table->_qproc to them on the next loop iteration.
 844		 */
 845		pt->_qproc = NULL;
 846		if (!count) {
 847			count = wait->error;
 848			if (signal_pending(current))
 849				count = -EINTR;
 850		}
 851		if (count || timed_out)
 852			break;
 853
 854		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 855		if (can_busy_loop && !need_resched()) {
 856			if (!busy_end) {
 857				busy_end = busy_loop_end_time();
 858				continue;
 859			}
 860			if (!busy_loop_timeout(busy_end))
 861				continue;
 862		}
 863		busy_flag = 0;
 864
 865		/*
 866		 * If this is the first loop and we have a timeout
 867		 * given, then we convert to ktime_t and set the to
 868		 * pointer to the expiry value.
 869		 */
 870		if (end_time && !to) {
 871			expire = timespec64_to_ktime(*end_time);
 872			to = &expire;
 873		}
 874
 875		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
 876			timed_out = 1;
 877	}
 878	return count;
 879}
 880
 881#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
 882			sizeof(struct pollfd))
 883
 884int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
 885		struct timespec64 *end_time)
 886{
 887	struct poll_wqueues table;
 888 	int err = -EFAULT, fdcount, len, size;
 889	/* Allocate small arguments on the stack to save memory and be
 890	   faster - use long to make sure the buffer is aligned properly
 891	   on 64 bit archs to avoid unaligned access */
 892	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
 893	struct poll_list *const head = (struct poll_list *)stack_pps;
 894 	struct poll_list *walk = head;
 895 	unsigned long todo = nfds;
 896
 897	if (nfds > rlimit(RLIMIT_NOFILE))
 898		return -EINVAL;
 899
 900	len = min_t(unsigned int, nfds, N_STACK_PPS);
 901	for (;;) {
 902		walk->next = NULL;
 903		walk->len = len;
 904		if (!len)
 905			break;
 906
 907		if (copy_from_user(walk->entries, ufds + nfds-todo,
 908					sizeof(struct pollfd) * walk->len))
 909			goto out_fds;
 910
 911		todo -= walk->len;
 912		if (!todo)
 913			break;
 914
 915		len = min(todo, POLLFD_PER_PAGE);
 916		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
 917		walk = walk->next = kmalloc(size, GFP_KERNEL);
 918		if (!walk) {
 919			err = -ENOMEM;
 920			goto out_fds;
 921		}
 922	}
 923
 924	poll_initwait(&table);
 925	fdcount = do_poll(head, &table, end_time);
 926	poll_freewait(&table);
 927
 928	for (walk = head; walk; walk = walk->next) {
 929		struct pollfd *fds = walk->entries;
 930		int j;
 931
 932		for (j = 0; j < walk->len; j++, ufds++)
 933			if (__put_user(fds[j].revents, &ufds->revents))
 934				goto out_fds;
 935  	}
 936
 937	err = fdcount;
 938out_fds:
 939	walk = head->next;
 940	while (walk) {
 941		struct poll_list *pos = walk;
 942		walk = walk->next;
 943		kfree(pos);
 944	}
 945
 946	return err;
 947}
 948
 949static long do_restart_poll(struct restart_block *restart_block)
 950{
 951	struct pollfd __user *ufds = restart_block->poll.ufds;
 952	int nfds = restart_block->poll.nfds;
 953	struct timespec64 *to = NULL, end_time;
 954	int ret;
 955
 956	if (restart_block->poll.has_timeout) {
 957		end_time.tv_sec = restart_block->poll.tv_sec;
 958		end_time.tv_nsec = restart_block->poll.tv_nsec;
 959		to = &end_time;
 960	}
 961
 962	ret = do_sys_poll(ufds, nfds, to);
 963
 964	if (ret == -EINTR) {
 965		restart_block->fn = do_restart_poll;
 966		ret = -ERESTART_RESTARTBLOCK;
 967	}
 968	return ret;
 969}
 970
 971SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
 972		int, timeout_msecs)
 973{
 974	struct timespec64 end_time, *to = NULL;
 975	int ret;
 976
 977	if (timeout_msecs >= 0) {
 978		to = &end_time;
 979		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
 980			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
 981	}
 982
 983	ret = do_sys_poll(ufds, nfds, to);
 984
 985	if (ret == -EINTR) {
 986		struct restart_block *restart_block;
 987
 988		restart_block = &current->restart_block;
 989		restart_block->fn = do_restart_poll;
 990		restart_block->poll.ufds = ufds;
 991		restart_block->poll.nfds = nfds;
 992
 993		if (timeout_msecs >= 0) {
 994			restart_block->poll.tv_sec = end_time.tv_sec;
 995			restart_block->poll.tv_nsec = end_time.tv_nsec;
 996			restart_block->poll.has_timeout = 1;
 997		} else
 998			restart_block->poll.has_timeout = 0;
 999
1000		ret = -ERESTART_RESTARTBLOCK;
1001	}
1002	return ret;
1003}
1004
1005SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1006		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1007		size_t, sigsetsize)
1008{
1009	sigset_t ksigmask, sigsaved;
1010	struct timespec ts;
1011	struct timespec64 end_time, *to = NULL;
1012	int ret;
1013
1014	if (tsp) {
1015		if (copy_from_user(&ts, tsp, sizeof(ts)))
1016			return -EFAULT;
1017
1018		to = &end_time;
1019		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1020			return -EINVAL;
1021	}
1022
1023	if (sigmask) {
1024		/* XXX: Don't preclude handling different sized sigset_t's.  */
1025		if (sigsetsize != sizeof(sigset_t))
1026			return -EINVAL;
1027		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1028			return -EFAULT;
1029
1030		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1031		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1032	}
1033
1034	ret = do_sys_poll(ufds, nfds, to);
1035
1036	/* We can restart this syscall, usually */
1037	if (ret == -EINTR) {
1038		/*
1039		 * Don't restore the signal mask yet. Let do_signal() deliver
1040		 * the signal on the way back to userspace, before the signal
1041		 * mask is restored.
1042		 */
1043		if (sigmask) {
1044			memcpy(&current->saved_sigmask, &sigsaved,
1045					sizeof(sigsaved));
1046			set_restore_sigmask();
1047		}
1048		ret = -ERESTARTNOHAND;
1049	} else if (sigmask)
1050		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1051
1052	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1053
1054	return ret;
1055}
v4.6
   1/*
   2 * This file contains the procedures for the handling of select and poll
   3 *
   4 * Created for Linux based loosely upon Mathius Lattner's minix
   5 * patches by Peter MacDonald. Heavily edited by Linus.
   6 *
   7 *  4 February 1994
   8 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
   9 *     flag set in its personality we do *not* modify the given timeout
  10 *     parameter to reflect time remaining.
  11 *
  12 *  24 January 2000
  13 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
  14 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/sched.h>
  19#include <linux/syscalls.h>
  20#include <linux/export.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
  24#include <linux/file.h>
  25#include <linux/fdtable.h>
  26#include <linux/fs.h>
  27#include <linux/rcupdate.h>
  28#include <linux/hrtimer.h>
  29#include <linux/sched/rt.h>
  30#include <linux/freezer.h>
  31#include <net/busy_poll.h>
 
  32
  33#include <asm/uaccess.h>
  34
  35
  36/*
  37 * Estimate expected accuracy in ns from a timeval.
  38 *
  39 * After quite a bit of churning around, we've settled on
  40 * a simple thing of taking 0.1% of the timeout as the
  41 * slack, with a cap of 100 msec.
  42 * "nice" tasks get a 0.5% slack instead.
  43 *
  44 * Consider this comment an open invitation to come up with even
  45 * better solutions..
  46 */
  47
  48#define MAX_SLACK	(100 * NSEC_PER_MSEC)
  49
  50static long __estimate_accuracy(struct timespec *tv)
  51{
  52	long slack;
  53	int divfactor = 1000;
  54
  55	if (tv->tv_sec < 0)
  56		return 0;
  57
  58	if (task_nice(current) > 0)
  59		divfactor = divfactor / 5;
  60
  61	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
  62		return MAX_SLACK;
  63
  64	slack = tv->tv_nsec / divfactor;
  65	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
  66
  67	if (slack > MAX_SLACK)
  68		return MAX_SLACK;
  69
  70	return slack;
  71}
  72
  73u64 select_estimate_accuracy(struct timespec *tv)
  74{
  75	u64 ret;
  76	struct timespec now;
  77
  78	/*
  79	 * Realtime tasks get a slack of 0 for obvious reasons.
  80	 */
  81
  82	if (rt_task(current))
  83		return 0;
  84
  85	ktime_get_ts(&now);
  86	now = timespec_sub(*tv, now);
  87	ret = __estimate_accuracy(&now);
  88	if (ret < current->timer_slack_ns)
  89		return current->timer_slack_ns;
  90	return ret;
  91}
  92
  93
  94
  95struct poll_table_page {
  96	struct poll_table_page * next;
  97	struct poll_table_entry * entry;
  98	struct poll_table_entry entries[0];
  99};
 100
 101#define POLL_TABLE_FULL(table) \
 102	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
 103
 104/*
 105 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
 106 * I have rewritten this, taking some shortcuts: This code may not be easy to
 107 * follow, but it should be free of race-conditions, and it's practical. If you
 108 * understand what I'm doing here, then you understand how the linux
 109 * sleep/wakeup mechanism works.
 110 *
 111 * Two very simple procedures, poll_wait() and poll_freewait() make all the
 112 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
 113 * as all select/poll functions have to call it to add an entry to the
 114 * poll table.
 115 */
 116static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 117		       poll_table *p);
 118
 119void poll_initwait(struct poll_wqueues *pwq)
 120{
 121	init_poll_funcptr(&pwq->pt, __pollwait);
 122	pwq->polling_task = current;
 123	pwq->triggered = 0;
 124	pwq->error = 0;
 125	pwq->table = NULL;
 126	pwq->inline_index = 0;
 127}
 128EXPORT_SYMBOL(poll_initwait);
 129
 130static void free_poll_entry(struct poll_table_entry *entry)
 131{
 132	remove_wait_queue(entry->wait_address, &entry->wait);
 133	fput(entry->filp);
 134}
 135
 136void poll_freewait(struct poll_wqueues *pwq)
 137{
 138	struct poll_table_page * p = pwq->table;
 139	int i;
 140	for (i = 0; i < pwq->inline_index; i++)
 141		free_poll_entry(pwq->inline_entries + i);
 142	while (p) {
 143		struct poll_table_entry * entry;
 144		struct poll_table_page *old;
 145
 146		entry = p->entry;
 147		do {
 148			entry--;
 149			free_poll_entry(entry);
 150		} while (entry > p->entries);
 151		old = p;
 152		p = p->next;
 153		free_page((unsigned long) old);
 154	}
 155}
 156EXPORT_SYMBOL(poll_freewait);
 157
 158static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 159{
 160	struct poll_table_page *table = p->table;
 161
 162	if (p->inline_index < N_INLINE_POLL_ENTRIES)
 163		return p->inline_entries + p->inline_index++;
 164
 165	if (!table || POLL_TABLE_FULL(table)) {
 166		struct poll_table_page *new_table;
 167
 168		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
 169		if (!new_table) {
 170			p->error = -ENOMEM;
 171			return NULL;
 172		}
 173		new_table->entry = new_table->entries;
 174		new_table->next = table;
 175		p->table = new_table;
 176		table = new_table;
 177	}
 178
 179	return table->entry++;
 180}
 181
 182static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 183{
 184	struct poll_wqueues *pwq = wait->private;
 185	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
 186
 187	/*
 188	 * Although this function is called under waitqueue lock, LOCK
 189	 * doesn't imply write barrier and the users expect write
 190	 * barrier semantics on wakeup functions.  The following
 191	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
 192	 * and is paired with smp_store_mb() in poll_schedule_timeout.
 193	 */
 194	smp_wmb();
 195	pwq->triggered = 1;
 196
 197	/*
 198	 * Perform the default wake up operation using a dummy
 199	 * waitqueue.
 200	 *
 201	 * TODO: This is hacky but there currently is no interface to
 202	 * pass in @sync.  @sync is scheduled to be removed and once
 203	 * that happens, wake_up_process() can be used directly.
 204	 */
 205	return default_wake_function(&dummy_wait, mode, sync, key);
 206}
 207
 208static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 209{
 210	struct poll_table_entry *entry;
 211
 212	entry = container_of(wait, struct poll_table_entry, wait);
 213	if (key && !((unsigned long)key & entry->key))
 214		return 0;
 215	return __pollwake(wait, mode, sync, key);
 216}
 217
 218/* Add a new entry */
 219static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 220				poll_table *p)
 221{
 222	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
 223	struct poll_table_entry *entry = poll_get_entry(pwq);
 224	if (!entry)
 225		return;
 226	entry->filp = get_file(filp);
 227	entry->wait_address = wait_address;
 228	entry->key = p->_key;
 229	init_waitqueue_func_entry(&entry->wait, pollwake);
 230	entry->wait.private = pwq;
 231	add_wait_queue(wait_address, &entry->wait);
 232}
 233
 234int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 235			  ktime_t *expires, unsigned long slack)
 236{
 237	int rc = -EINTR;
 238
 239	set_current_state(state);
 240	if (!pwq->triggered)
 241		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
 242	__set_current_state(TASK_RUNNING);
 243
 244	/*
 245	 * Prepare for the next iteration.
 246	 *
 247	 * The following smp_store_mb() serves two purposes.  First, it's
 248	 * the counterpart rmb of the wmb in pollwake() such that data
 249	 * written before wake up is always visible after wake up.
 250	 * Second, the full barrier guarantees that triggered clearing
 251	 * doesn't pass event check of the next iteration.  Note that
 252	 * this problem doesn't exist for the first iteration as
 253	 * add_wait_queue() has full barrier semantics.
 254	 */
 255	smp_store_mb(pwq->triggered, 0);
 256
 257	return rc;
 258}
 259EXPORT_SYMBOL(poll_schedule_timeout);
 260
 261/**
 262 * poll_select_set_timeout - helper function to setup the timeout value
 263 * @to:		pointer to timespec variable for the final timeout
 264 * @sec:	seconds (from user space)
 265 * @nsec:	nanoseconds (from user space)
 266 *
 267 * Note, we do not use a timespec for the user space value here, That
 268 * way we can use the function for timeval and compat interfaces as well.
 269 *
 270 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
 271 */
 272int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
 273{
 274	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
 275
 276	if (!timespec_valid(&ts))
 277		return -EINVAL;
 278
 279	/* Optimize for the zero timeout value here */
 280	if (!sec && !nsec) {
 281		to->tv_sec = to->tv_nsec = 0;
 282	} else {
 283		ktime_get_ts(to);
 284		*to = timespec_add_safe(*to, ts);
 285	}
 286	return 0;
 287}
 288
 289static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
 
 290				      int timeval, int ret)
 291{
 
 292	struct timespec rts;
 293	struct timeval rtv;
 294
 295	if (!p)
 296		return ret;
 297
 298	if (current->personality & STICKY_TIMEOUTS)
 299		goto sticky;
 300
 301	/* No update for zero timeout */
 302	if (!end_time->tv_sec && !end_time->tv_nsec)
 303		return ret;
 304
 305	ktime_get_ts(&rts);
 306	rts = timespec_sub(*end_time, rts);
 307	if (rts.tv_sec < 0)
 308		rts.tv_sec = rts.tv_nsec = 0;
 
 
 309
 310	if (timeval) {
 311		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
 312			memset(&rtv, 0, sizeof(rtv));
 313		rtv.tv_sec = rts.tv_sec;
 314		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
 315
 316		if (!copy_to_user(p, &rtv, sizeof(rtv)))
 317			return ret;
 318
 319	} else if (!copy_to_user(p, &rts, sizeof(rts)))
 320		return ret;
 321
 322	/*
 323	 * If an application puts its timeval in read-only memory, we
 324	 * don't want the Linux-specific update to the timeval to
 325	 * cause a fault after the select has completed
 326	 * successfully. However, because we're not updating the
 327	 * timeval, we can't restart the system call.
 328	 */
 329
 330sticky:
 331	if (ret == -ERESTARTNOHAND)
 332		ret = -EINTR;
 333	return ret;
 334}
 335
 336#define FDS_IN(fds, n)		(fds->in + n)
 337#define FDS_OUT(fds, n)		(fds->out + n)
 338#define FDS_EX(fds, n)		(fds->ex + n)
 339
 340#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
 341
 342static int max_select_fd(unsigned long n, fd_set_bits *fds)
 343{
 344	unsigned long *open_fds;
 345	unsigned long set;
 346	int max;
 347	struct fdtable *fdt;
 348
 349	/* handle last in-complete long-word first */
 350	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
 351	n /= BITS_PER_LONG;
 352	fdt = files_fdtable(current->files);
 353	open_fds = fdt->open_fds + n;
 354	max = 0;
 355	if (set) {
 356		set &= BITS(fds, n);
 357		if (set) {
 358			if (!(set & ~*open_fds))
 359				goto get_max;
 360			return -EBADF;
 361		}
 362	}
 363	while (n) {
 364		open_fds--;
 365		n--;
 366		set = BITS(fds, n);
 367		if (!set)
 368			continue;
 369		if (set & ~*open_fds)
 370			return -EBADF;
 371		if (max)
 372			continue;
 373get_max:
 374		do {
 375			max++;
 376			set >>= 1;
 377		} while (set);
 378		max += n * BITS_PER_LONG;
 379	}
 380
 381	return max;
 382}
 383
 384#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
 385#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 386#define POLLEX_SET (POLLPRI)
 387
 388static inline void wait_key_set(poll_table *wait, unsigned long in,
 389				unsigned long out, unsigned long bit,
 390				unsigned int ll_flag)
 391{
 392	wait->_key = POLLEX_SET | ll_flag;
 393	if (in & bit)
 394		wait->_key |= POLLIN_SET;
 395	if (out & bit)
 396		wait->_key |= POLLOUT_SET;
 397}
 398
 399int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
 400{
 401	ktime_t expire, *to = NULL;
 402	struct poll_wqueues table;
 403	poll_table *wait;
 404	int retval, i, timed_out = 0;
 405	u64 slack = 0;
 406	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 407	unsigned long busy_end = 0;
 408
 409	rcu_read_lock();
 410	retval = max_select_fd(n, fds);
 411	rcu_read_unlock();
 412
 413	if (retval < 0)
 414		return retval;
 415	n = retval;
 416
 417	poll_initwait(&table);
 418	wait = &table.pt;
 419	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 420		wait->_qproc = NULL;
 421		timed_out = 1;
 422	}
 423
 424	if (end_time && !timed_out)
 425		slack = select_estimate_accuracy(end_time);
 426
 427	retval = 0;
 428	for (;;) {
 429		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
 430		bool can_busy_loop = false;
 431
 432		inp = fds->in; outp = fds->out; exp = fds->ex;
 433		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
 434
 435		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
 436			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
 437			unsigned long res_in = 0, res_out = 0, res_ex = 0;
 438
 439			in = *inp++; out = *outp++; ex = *exp++;
 440			all_bits = in | out | ex;
 441			if (all_bits == 0) {
 442				i += BITS_PER_LONG;
 443				continue;
 444			}
 445
 446			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
 447				struct fd f;
 448				if (i >= n)
 449					break;
 450				if (!(bit & all_bits))
 451					continue;
 452				f = fdget(i);
 453				if (f.file) {
 454					const struct file_operations *f_op;
 455					f_op = f.file->f_op;
 456					mask = DEFAULT_POLLMASK;
 457					if (f_op->poll) {
 458						wait_key_set(wait, in, out,
 459							     bit, busy_flag);
 460						mask = (*f_op->poll)(f.file, wait);
 461					}
 462					fdput(f);
 463					if ((mask & POLLIN_SET) && (in & bit)) {
 464						res_in |= bit;
 465						retval++;
 466						wait->_qproc = NULL;
 467					}
 468					if ((mask & POLLOUT_SET) && (out & bit)) {
 469						res_out |= bit;
 470						retval++;
 471						wait->_qproc = NULL;
 472					}
 473					if ((mask & POLLEX_SET) && (ex & bit)) {
 474						res_ex |= bit;
 475						retval++;
 476						wait->_qproc = NULL;
 477					}
 478					/* got something, stop busy polling */
 479					if (retval) {
 480						can_busy_loop = false;
 481						busy_flag = 0;
 482
 483					/*
 484					 * only remember a returned
 485					 * POLL_BUSY_LOOP if we asked for it
 486					 */
 487					} else if (busy_flag & mask)
 488						can_busy_loop = true;
 489
 490				}
 491			}
 492			if (res_in)
 493				*rinp = res_in;
 494			if (res_out)
 495				*routp = res_out;
 496			if (res_ex)
 497				*rexp = res_ex;
 498			cond_resched();
 499		}
 500		wait->_qproc = NULL;
 501		if (retval || timed_out || signal_pending(current))
 502			break;
 503		if (table.error) {
 504			retval = table.error;
 505			break;
 506		}
 507
 508		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 509		if (can_busy_loop && !need_resched()) {
 510			if (!busy_end) {
 511				busy_end = busy_loop_end_time();
 512				continue;
 513			}
 514			if (!busy_loop_timeout(busy_end))
 515				continue;
 516		}
 517		busy_flag = 0;
 518
 519		/*
 520		 * If this is the first loop and we have a timeout
 521		 * given, then we convert to ktime_t and set the to
 522		 * pointer to the expiry value.
 523		 */
 524		if (end_time && !to) {
 525			expire = timespec_to_ktime(*end_time);
 526			to = &expire;
 527		}
 528
 529		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
 530					   to, slack))
 531			timed_out = 1;
 532	}
 533
 534	poll_freewait(&table);
 535
 536	return retval;
 537}
 538
 539/*
 540 * We can actually return ERESTARTSYS instead of EINTR, but I'd
 541 * like to be certain this leads to no problems. So I return
 542 * EINTR just for safety.
 543 *
 544 * Update: ERESTARTSYS breaks at least the xview clock binary, so
 545 * I'm trying ERESTARTNOHAND which restart only when you want to.
 546 */
 547int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
 548			   fd_set __user *exp, struct timespec *end_time)
 549{
 550	fd_set_bits fds;
 551	void *bits;
 552	int ret, max_fds;
 553	unsigned int size;
 554	struct fdtable *fdt;
 555	/* Allocate small arguments on the stack to save memory and be faster */
 556	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
 557
 558	ret = -EINVAL;
 559	if (n < 0)
 560		goto out_nofds;
 561
 562	/* max_fds can increase, so grab it once to avoid race */
 563	rcu_read_lock();
 564	fdt = files_fdtable(current->files);
 565	max_fds = fdt->max_fds;
 566	rcu_read_unlock();
 567	if (n > max_fds)
 568		n = max_fds;
 569
 570	/*
 571	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
 572	 * since we used fdset we need to allocate memory in units of
 573	 * long-words. 
 574	 */
 575	size = FDS_BYTES(n);
 576	bits = stack_fds;
 577	if (size > sizeof(stack_fds) / 6) {
 578		/* Not enough space in on-stack array; must use kmalloc */
 579		ret = -ENOMEM;
 580		bits = kmalloc(6 * size, GFP_KERNEL);
 
 
 
 
 
 
 
 581		if (!bits)
 582			goto out_nofds;
 583	}
 584	fds.in      = bits;
 585	fds.out     = bits +   size;
 586	fds.ex      = bits + 2*size;
 587	fds.res_in  = bits + 3*size;
 588	fds.res_out = bits + 4*size;
 589	fds.res_ex  = bits + 5*size;
 590
 591	if ((ret = get_fd_set(n, inp, fds.in)) ||
 592	    (ret = get_fd_set(n, outp, fds.out)) ||
 593	    (ret = get_fd_set(n, exp, fds.ex)))
 594		goto out;
 595	zero_fd_set(n, fds.res_in);
 596	zero_fd_set(n, fds.res_out);
 597	zero_fd_set(n, fds.res_ex);
 598
 599	ret = do_select(n, &fds, end_time);
 600
 601	if (ret < 0)
 602		goto out;
 603	if (!ret) {
 604		ret = -ERESTARTNOHAND;
 605		if (signal_pending(current))
 606			goto out;
 607		ret = 0;
 608	}
 609
 610	if (set_fd_set(n, inp, fds.res_in) ||
 611	    set_fd_set(n, outp, fds.res_out) ||
 612	    set_fd_set(n, exp, fds.res_ex))
 613		ret = -EFAULT;
 614
 615out:
 616	if (bits != stack_fds)
 617		kfree(bits);
 618out_nofds:
 619	return ret;
 620}
 621
 622SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 623		fd_set __user *, exp, struct timeval __user *, tvp)
 624{
 625	struct timespec end_time, *to = NULL;
 626	struct timeval tv;
 627	int ret;
 628
 629	if (tvp) {
 630		if (copy_from_user(&tv, tvp, sizeof(tv)))
 631			return -EFAULT;
 632
 633		to = &end_time;
 634		if (poll_select_set_timeout(to,
 635				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
 636				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
 637			return -EINVAL;
 638	}
 639
 640	ret = core_sys_select(n, inp, outp, exp, to);
 641	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
 642
 643	return ret;
 644}
 645
 646static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
 647		       fd_set __user *exp, struct timespec __user *tsp,
 648		       const sigset_t __user *sigmask, size_t sigsetsize)
 649{
 650	sigset_t ksigmask, sigsaved;
 651	struct timespec ts, end_time, *to = NULL;
 
 652	int ret;
 653
 654	if (tsp) {
 655		if (copy_from_user(&ts, tsp, sizeof(ts)))
 656			return -EFAULT;
 
 657
 658		to = &end_time;
 659		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
 660			return -EINVAL;
 661	}
 662
 663	if (sigmask) {
 664		/* XXX: Don't preclude handling different sized sigset_t's.  */
 665		if (sigsetsize != sizeof(sigset_t))
 666			return -EINVAL;
 667		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
 668			return -EFAULT;
 669
 670		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
 671		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 672	}
 673
 674	ret = core_sys_select(n, inp, outp, exp, to);
 675	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 676
 677	if (ret == -ERESTARTNOHAND) {
 678		/*
 679		 * Don't restore the signal mask yet. Let do_signal() deliver
 680		 * the signal on the way back to userspace, before the signal
 681		 * mask is restored.
 682		 */
 683		if (sigmask) {
 684			memcpy(&current->saved_sigmask, &sigsaved,
 685					sizeof(sigsaved));
 686			set_restore_sigmask();
 687		}
 688	} else if (sigmask)
 689		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 690
 691	return ret;
 692}
 693
 694/*
 695 * Most architectures can't handle 7-argument syscalls. So we provide a
 696 * 6-argument version where the sixth argument is a pointer to a structure
 697 * which has a pointer to the sigset_t itself followed by a size_t containing
 698 * the sigset size.
 699 */
 700SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 701		fd_set __user *, exp, struct timespec __user *, tsp,
 702		void __user *, sig)
 703{
 704	size_t sigsetsize = 0;
 705	sigset_t __user *up = NULL;
 706
 707	if (sig) {
 708		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
 709		    || __get_user(up, (sigset_t __user * __user *)sig)
 710		    || __get_user(sigsetsize,
 711				(size_t __user *)(sig+sizeof(void *))))
 712			return -EFAULT;
 713	}
 714
 715	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 716}
 717
 718#ifdef __ARCH_WANT_SYS_OLD_SELECT
 719struct sel_arg_struct {
 720	unsigned long n;
 721	fd_set __user *inp, *outp, *exp;
 722	struct timeval __user *tvp;
 723};
 724
 725SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
 726{
 727	struct sel_arg_struct a;
 728
 729	if (copy_from_user(&a, arg, sizeof(a)))
 730		return -EFAULT;
 731	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
 732}
 733#endif
 734
 735struct poll_list {
 736	struct poll_list *next;
 737	int len;
 738	struct pollfd entries[0];
 739};
 740
 741#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
 742
 743/*
 744 * Fish for pollable events on the pollfd->fd file descriptor. We're only
 745 * interested in events matching the pollfd->events mask, and the result
 746 * matching that mask is both recorded in pollfd->revents and returned. The
 747 * pwait poll_table will be used by the fd-provided poll handler for waiting,
 748 * if pwait->_qproc is non-NULL.
 749 */
 750static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
 751				     bool *can_busy_poll,
 752				     unsigned int busy_flag)
 753{
 754	unsigned int mask;
 755	int fd;
 756
 757	mask = 0;
 758	fd = pollfd->fd;
 759	if (fd >= 0) {
 760		struct fd f = fdget(fd);
 761		mask = POLLNVAL;
 762		if (f.file) {
 763			mask = DEFAULT_POLLMASK;
 764			if (f.file->f_op->poll) {
 765				pwait->_key = pollfd->events|POLLERR|POLLHUP;
 766				pwait->_key |= busy_flag;
 767				mask = f.file->f_op->poll(f.file, pwait);
 768				if (mask & busy_flag)
 769					*can_busy_poll = true;
 770			}
 771			/* Mask out unneeded events. */
 772			mask &= pollfd->events | POLLERR | POLLHUP;
 773			fdput(f);
 774		}
 775	}
 776	pollfd->revents = mask;
 777
 778	return mask;
 779}
 780
 781static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
 782		   struct timespec *end_time)
 783{
 784	poll_table* pt = &wait->pt;
 785	ktime_t expire, *to = NULL;
 786	int timed_out = 0, count = 0;
 787	u64 slack = 0;
 788	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 789	unsigned long busy_end = 0;
 790
 791	/* Optimise the no-wait case */
 792	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 793		pt->_qproc = NULL;
 794		timed_out = 1;
 795	}
 796
 797	if (end_time && !timed_out)
 798		slack = select_estimate_accuracy(end_time);
 799
 800	for (;;) {
 801		struct poll_list *walk;
 802		bool can_busy_loop = false;
 803
 804		for (walk = list; walk != NULL; walk = walk->next) {
 805			struct pollfd * pfd, * pfd_end;
 806
 807			pfd = walk->entries;
 808			pfd_end = pfd + walk->len;
 809			for (; pfd != pfd_end; pfd++) {
 810				/*
 811				 * Fish for events. If we found one, record it
 812				 * and kill poll_table->_qproc, so we don't
 813				 * needlessly register any other waiters after
 814				 * this. They'll get immediately deregistered
 815				 * when we break out and return.
 816				 */
 817				if (do_pollfd(pfd, pt, &can_busy_loop,
 818					      busy_flag)) {
 819					count++;
 820					pt->_qproc = NULL;
 821					/* found something, stop busy polling */
 822					busy_flag = 0;
 823					can_busy_loop = false;
 824				}
 825			}
 826		}
 827		/*
 828		 * All waiters have already been registered, so don't provide
 829		 * a poll_table->_qproc to them on the next loop iteration.
 830		 */
 831		pt->_qproc = NULL;
 832		if (!count) {
 833			count = wait->error;
 834			if (signal_pending(current))
 835				count = -EINTR;
 836		}
 837		if (count || timed_out)
 838			break;
 839
 840		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 841		if (can_busy_loop && !need_resched()) {
 842			if (!busy_end) {
 843				busy_end = busy_loop_end_time();
 844				continue;
 845			}
 846			if (!busy_loop_timeout(busy_end))
 847				continue;
 848		}
 849		busy_flag = 0;
 850
 851		/*
 852		 * If this is the first loop and we have a timeout
 853		 * given, then we convert to ktime_t and set the to
 854		 * pointer to the expiry value.
 855		 */
 856		if (end_time && !to) {
 857			expire = timespec_to_ktime(*end_time);
 858			to = &expire;
 859		}
 860
 861		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
 862			timed_out = 1;
 863	}
 864	return count;
 865}
 866
 867#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
 868			sizeof(struct pollfd))
 869
 870int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
 871		struct timespec *end_time)
 872{
 873	struct poll_wqueues table;
 874 	int err = -EFAULT, fdcount, len, size;
 875	/* Allocate small arguments on the stack to save memory and be
 876	   faster - use long to make sure the buffer is aligned properly
 877	   on 64 bit archs to avoid unaligned access */
 878	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
 879	struct poll_list *const head = (struct poll_list *)stack_pps;
 880 	struct poll_list *walk = head;
 881 	unsigned long todo = nfds;
 882
 883	if (nfds > rlimit(RLIMIT_NOFILE))
 884		return -EINVAL;
 885
 886	len = min_t(unsigned int, nfds, N_STACK_PPS);
 887	for (;;) {
 888		walk->next = NULL;
 889		walk->len = len;
 890		if (!len)
 891			break;
 892
 893		if (copy_from_user(walk->entries, ufds + nfds-todo,
 894					sizeof(struct pollfd) * walk->len))
 895			goto out_fds;
 896
 897		todo -= walk->len;
 898		if (!todo)
 899			break;
 900
 901		len = min(todo, POLLFD_PER_PAGE);
 902		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
 903		walk = walk->next = kmalloc(size, GFP_KERNEL);
 904		if (!walk) {
 905			err = -ENOMEM;
 906			goto out_fds;
 907		}
 908	}
 909
 910	poll_initwait(&table);
 911	fdcount = do_poll(head, &table, end_time);
 912	poll_freewait(&table);
 913
 914	for (walk = head; walk; walk = walk->next) {
 915		struct pollfd *fds = walk->entries;
 916		int j;
 917
 918		for (j = 0; j < walk->len; j++, ufds++)
 919			if (__put_user(fds[j].revents, &ufds->revents))
 920				goto out_fds;
 921  	}
 922
 923	err = fdcount;
 924out_fds:
 925	walk = head->next;
 926	while (walk) {
 927		struct poll_list *pos = walk;
 928		walk = walk->next;
 929		kfree(pos);
 930	}
 931
 932	return err;
 933}
 934
 935static long do_restart_poll(struct restart_block *restart_block)
 936{
 937	struct pollfd __user *ufds = restart_block->poll.ufds;
 938	int nfds = restart_block->poll.nfds;
 939	struct timespec *to = NULL, end_time;
 940	int ret;
 941
 942	if (restart_block->poll.has_timeout) {
 943		end_time.tv_sec = restart_block->poll.tv_sec;
 944		end_time.tv_nsec = restart_block->poll.tv_nsec;
 945		to = &end_time;
 946	}
 947
 948	ret = do_sys_poll(ufds, nfds, to);
 949
 950	if (ret == -EINTR) {
 951		restart_block->fn = do_restart_poll;
 952		ret = -ERESTART_RESTARTBLOCK;
 953	}
 954	return ret;
 955}
 956
 957SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
 958		int, timeout_msecs)
 959{
 960	struct timespec end_time, *to = NULL;
 961	int ret;
 962
 963	if (timeout_msecs >= 0) {
 964		to = &end_time;
 965		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
 966			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
 967	}
 968
 969	ret = do_sys_poll(ufds, nfds, to);
 970
 971	if (ret == -EINTR) {
 972		struct restart_block *restart_block;
 973
 974		restart_block = &current->restart_block;
 975		restart_block->fn = do_restart_poll;
 976		restart_block->poll.ufds = ufds;
 977		restart_block->poll.nfds = nfds;
 978
 979		if (timeout_msecs >= 0) {
 980			restart_block->poll.tv_sec = end_time.tv_sec;
 981			restart_block->poll.tv_nsec = end_time.tv_nsec;
 982			restart_block->poll.has_timeout = 1;
 983		} else
 984			restart_block->poll.has_timeout = 0;
 985
 986		ret = -ERESTART_RESTARTBLOCK;
 987	}
 988	return ret;
 989}
 990
 991SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
 992		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
 993		size_t, sigsetsize)
 994{
 995	sigset_t ksigmask, sigsaved;
 996	struct timespec ts, end_time, *to = NULL;
 
 997	int ret;
 998
 999	if (tsp) {
1000		if (copy_from_user(&ts, tsp, sizeof(ts)))
1001			return -EFAULT;
1002
1003		to = &end_time;
1004		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1005			return -EINVAL;
1006	}
1007
1008	if (sigmask) {
1009		/* XXX: Don't preclude handling different sized sigset_t's.  */
1010		if (sigsetsize != sizeof(sigset_t))
1011			return -EINVAL;
1012		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1013			return -EFAULT;
1014
1015		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1016		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1017	}
1018
1019	ret = do_sys_poll(ufds, nfds, to);
1020
1021	/* We can restart this syscall, usually */
1022	if (ret == -EINTR) {
1023		/*
1024		 * Don't restore the signal mask yet. Let do_signal() deliver
1025		 * the signal on the way back to userspace, before the signal
1026		 * mask is restored.
1027		 */
1028		if (sigmask) {
1029			memcpy(&current->saved_sigmask, &sigsaved,
1030					sizeof(sigsaved));
1031			set_restore_sigmask();
1032		}
1033		ret = -ERESTARTNOHAND;
1034	} else if (sigmask)
1035		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1036
1037	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1038
1039	return ret;
1040}