Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * This file contains the procedures for the handling of select and poll
   3 *
   4 * Created for Linux based loosely upon Mathius Lattner's minix
   5 * patches by Peter MacDonald. Heavily edited by Linus.
   6 *
   7 *  4 February 1994
   8 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
   9 *     flag set in its personality we do *not* modify the given timeout
  10 *     parameter to reflect time remaining.
  11 *
  12 *  24 January 2000
  13 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
  14 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
  15 */
  16
 
  17#include <linux/kernel.h>
  18#include <linux/sched.h>
 
  19#include <linux/syscalls.h>
  20#include <linux/export.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
  24#include <linux/file.h>
  25#include <linux/fdtable.h>
  26#include <linux/fs.h>
  27#include <linux/rcupdate.h>
  28#include <linux/hrtimer.h>
  29#include <linux/sched/rt.h>
  30#include <linux/freezer.h>
  31#include <net/busy_poll.h>
  32#include <linux/vmalloc.h>
  33
  34#include <linux/uaccess.h>
  35
  36
  37/*
  38 * Estimate expected accuracy in ns from a timeval.
  39 *
  40 * After quite a bit of churning around, we've settled on
  41 * a simple thing of taking 0.1% of the timeout as the
  42 * slack, with a cap of 100 msec.
  43 * "nice" tasks get a 0.5% slack instead.
  44 *
  45 * Consider this comment an open invitation to come up with even
  46 * better solutions..
  47 */
  48
  49#define MAX_SLACK	(100 * NSEC_PER_MSEC)
  50
  51static long __estimate_accuracy(struct timespec64 *tv)
  52{
  53	long slack;
  54	int divfactor = 1000;
  55
  56	if (tv->tv_sec < 0)
  57		return 0;
  58
  59	if (task_nice(current) > 0)
  60		divfactor = divfactor / 5;
  61
  62	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
  63		return MAX_SLACK;
  64
  65	slack = tv->tv_nsec / divfactor;
  66	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
  67
  68	if (slack > MAX_SLACK)
  69		return MAX_SLACK;
  70
  71	return slack;
  72}
  73
  74u64 select_estimate_accuracy(struct timespec64 *tv)
  75{
  76	u64 ret;
  77	struct timespec64 now;
  78
  79	/*
  80	 * Realtime tasks get a slack of 0 for obvious reasons.
  81	 */
  82
  83	if (rt_task(current))
  84		return 0;
  85
  86	ktime_get_ts64(&now);
  87	now = timespec64_sub(*tv, now);
  88	ret = __estimate_accuracy(&now);
  89	if (ret < current->timer_slack_ns)
  90		return current->timer_slack_ns;
  91	return ret;
  92}
  93
  94
  95
  96struct poll_table_page {
  97	struct poll_table_page * next;
  98	struct poll_table_entry * entry;
  99	struct poll_table_entry entries[0];
 100};
 101
 102#define POLL_TABLE_FULL(table) \
 103	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
 104
 105/*
 106 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
 107 * I have rewritten this, taking some shortcuts: This code may not be easy to
 108 * follow, but it should be free of race-conditions, and it's practical. If you
 109 * understand what I'm doing here, then you understand how the linux
 110 * sleep/wakeup mechanism works.
 111 *
 112 * Two very simple procedures, poll_wait() and poll_freewait() make all the
 113 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
 114 * as all select/poll functions have to call it to add an entry to the
 115 * poll table.
 116 */
 117static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 118		       poll_table *p);
 119
 120void poll_initwait(struct poll_wqueues *pwq)
 121{
 122	init_poll_funcptr(&pwq->pt, __pollwait);
 123	pwq->polling_task = current;
 124	pwq->triggered = 0;
 125	pwq->error = 0;
 126	pwq->table = NULL;
 127	pwq->inline_index = 0;
 128}
 129EXPORT_SYMBOL(poll_initwait);
 130
 131static void free_poll_entry(struct poll_table_entry *entry)
 132{
 133	remove_wait_queue(entry->wait_address, &entry->wait);
 134	fput(entry->filp);
 135}
 136
 137void poll_freewait(struct poll_wqueues *pwq)
 138{
 139	struct poll_table_page * p = pwq->table;
 140	int i;
 141	for (i = 0; i < pwq->inline_index; i++)
 142		free_poll_entry(pwq->inline_entries + i);
 143	while (p) {
 144		struct poll_table_entry * entry;
 145		struct poll_table_page *old;
 146
 147		entry = p->entry;
 148		do {
 149			entry--;
 150			free_poll_entry(entry);
 151		} while (entry > p->entries);
 152		old = p;
 153		p = p->next;
 154		free_page((unsigned long) old);
 155	}
 156}
 157EXPORT_SYMBOL(poll_freewait);
 158
 159static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 160{
 161	struct poll_table_page *table = p->table;
 162
 163	if (p->inline_index < N_INLINE_POLL_ENTRIES)
 164		return p->inline_entries + p->inline_index++;
 165
 166	if (!table || POLL_TABLE_FULL(table)) {
 167		struct poll_table_page *new_table;
 168
 169		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
 170		if (!new_table) {
 171			p->error = -ENOMEM;
 172			return NULL;
 173		}
 174		new_table->entry = new_table->entries;
 175		new_table->next = table;
 176		p->table = new_table;
 177		table = new_table;
 178	}
 179
 180	return table->entry++;
 181}
 182
 183static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 184{
 185	struct poll_wqueues *pwq = wait->private;
 186	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
 187
 188	/*
 189	 * Although this function is called under waitqueue lock, LOCK
 190	 * doesn't imply write barrier and the users expect write
 191	 * barrier semantics on wakeup functions.  The following
 192	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
 193	 * and is paired with smp_store_mb() in poll_schedule_timeout.
 194	 */
 195	smp_wmb();
 196	pwq->triggered = 1;
 197
 198	/*
 199	 * Perform the default wake up operation using a dummy
 200	 * waitqueue.
 201	 *
 202	 * TODO: This is hacky but there currently is no interface to
 203	 * pass in @sync.  @sync is scheduled to be removed and once
 204	 * that happens, wake_up_process() can be used directly.
 205	 */
 206	return default_wake_function(&dummy_wait, mode, sync, key);
 207}
 208
 209static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 210{
 211	struct poll_table_entry *entry;
 212
 213	entry = container_of(wait, struct poll_table_entry, wait);
 214	if (key && !((unsigned long)key & entry->key))
 215		return 0;
 216	return __pollwake(wait, mode, sync, key);
 217}
 218
 219/* Add a new entry */
 220static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 221				poll_table *p)
 222{
 223	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
 224	struct poll_table_entry *entry = poll_get_entry(pwq);
 225	if (!entry)
 226		return;
 227	entry->filp = get_file(filp);
 228	entry->wait_address = wait_address;
 229	entry->key = p->_key;
 230	init_waitqueue_func_entry(&entry->wait, pollwake);
 231	entry->wait.private = pwq;
 232	add_wait_queue(wait_address, &entry->wait);
 233}
 234
 235int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 236			  ktime_t *expires, unsigned long slack)
 237{
 238	int rc = -EINTR;
 239
 240	set_current_state(state);
 241	if (!pwq->triggered)
 242		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
 243	__set_current_state(TASK_RUNNING);
 244
 245	/*
 246	 * Prepare for the next iteration.
 247	 *
 248	 * The following smp_store_mb() serves two purposes.  First, it's
 249	 * the counterpart rmb of the wmb in pollwake() such that data
 250	 * written before wake up is always visible after wake up.
 251	 * Second, the full barrier guarantees that triggered clearing
 252	 * doesn't pass event check of the next iteration.  Note that
 253	 * this problem doesn't exist for the first iteration as
 254	 * add_wait_queue() has full barrier semantics.
 255	 */
 256	smp_store_mb(pwq->triggered, 0);
 257
 258	return rc;
 259}
 260EXPORT_SYMBOL(poll_schedule_timeout);
 261
 262/**
 263 * poll_select_set_timeout - helper function to setup the timeout value
 264 * @to:		pointer to timespec64 variable for the final timeout
 265 * @sec:	seconds (from user space)
 266 * @nsec:	nanoseconds (from user space)
 267 *
 268 * Note, we do not use a timespec for the user space value here, That
 269 * way we can use the function for timeval and compat interfaces as well.
 270 *
 271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
 272 */
 273int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
 274{
 275	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
 276
 277	if (!timespec64_valid(&ts))
 278		return -EINVAL;
 279
 280	/* Optimize for the zero timeout value here */
 281	if (!sec && !nsec) {
 282		to->tv_sec = to->tv_nsec = 0;
 283	} else {
 284		ktime_get_ts64(to);
 285		*to = timespec64_add_safe(*to, ts);
 286	}
 287	return 0;
 288}
 289
 290static int poll_select_copy_remaining(struct timespec64 *end_time,
 291				      void __user *p,
 292				      int timeval, int ret)
 293{
 294	struct timespec64 rts64;
 295	struct timespec rts;
 296	struct timeval rtv;
 
 
 
 
 
 
 
 297
 298	if (!p)
 299		return ret;
 300
 301	if (current->personality & STICKY_TIMEOUTS)
 302		goto sticky;
 303
 304	/* No update for zero timeout */
 305	if (!end_time->tv_sec && !end_time->tv_nsec)
 306		return ret;
 307
 308	ktime_get_ts64(&rts64);
 309	rts64 = timespec64_sub(*end_time, rts64);
 310	if (rts64.tv_sec < 0)
 311		rts64.tv_sec = rts64.tv_nsec = 0;
 312
 313	rts = timespec64_to_timespec(rts64);
 314
 315	if (timeval) {
 316		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
 317			memset(&rtv, 0, sizeof(rtv));
 318		rtv.tv_sec = rts64.tv_sec;
 319		rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC;
 320
 321		if (!copy_to_user(p, &rtv, sizeof(rtv)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322			return ret;
 323
 324	} else if (!copy_to_user(p, &rts, sizeof(rts)))
 325		return ret;
 326
 
 
 
 
 327	/*
 328	 * If an application puts its timeval in read-only memory, we
 329	 * don't want the Linux-specific update to the timeval to
 330	 * cause a fault after the select has completed
 331	 * successfully. However, because we're not updating the
 332	 * timeval, we can't restart the system call.
 333	 */
 334
 335sticky:
 336	if (ret == -ERESTARTNOHAND)
 337		ret = -EINTR;
 338	return ret;
 339}
 340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341#define FDS_IN(fds, n)		(fds->in + n)
 342#define FDS_OUT(fds, n)		(fds->out + n)
 343#define FDS_EX(fds, n)		(fds->ex + n)
 344
 345#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
 346
 347static int max_select_fd(unsigned long n, fd_set_bits *fds)
 348{
 349	unsigned long *open_fds;
 350	unsigned long set;
 351	int max;
 352	struct fdtable *fdt;
 353
 354	/* handle last in-complete long-word first */
 355	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
 356	n /= BITS_PER_LONG;
 357	fdt = files_fdtable(current->files);
 358	open_fds = fdt->open_fds + n;
 359	max = 0;
 360	if (set) {
 361		set &= BITS(fds, n);
 362		if (set) {
 363			if (!(set & ~*open_fds))
 364				goto get_max;
 365			return -EBADF;
 366		}
 367	}
 368	while (n) {
 369		open_fds--;
 370		n--;
 371		set = BITS(fds, n);
 372		if (!set)
 373			continue;
 374		if (set & ~*open_fds)
 375			return -EBADF;
 376		if (max)
 377			continue;
 378get_max:
 379		do {
 380			max++;
 381			set >>= 1;
 382		} while (set);
 383		max += n * BITS_PER_LONG;
 384	}
 385
 386	return max;
 387}
 388
 389#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
 390#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 391#define POLLEX_SET (POLLPRI)
 
 
 392
 393static inline void wait_key_set(poll_table *wait, unsigned long in,
 394				unsigned long out, unsigned long bit,
 395				unsigned int ll_flag)
 396{
 397	wait->_key = POLLEX_SET | ll_flag;
 398	if (in & bit)
 399		wait->_key |= POLLIN_SET;
 400	if (out & bit)
 401		wait->_key |= POLLOUT_SET;
 402}
 403
 404int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
 405{
 406	ktime_t expire, *to = NULL;
 407	struct poll_wqueues table;
 408	poll_table *wait;
 409	int retval, i, timed_out = 0;
 410	u64 slack = 0;
 411	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 412	unsigned long busy_end = 0;
 413
 414	rcu_read_lock();
 415	retval = max_select_fd(n, fds);
 416	rcu_read_unlock();
 417
 418	if (retval < 0)
 419		return retval;
 420	n = retval;
 421
 422	poll_initwait(&table);
 423	wait = &table.pt;
 424	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 425		wait->_qproc = NULL;
 426		timed_out = 1;
 427	}
 428
 429	if (end_time && !timed_out)
 430		slack = select_estimate_accuracy(end_time);
 431
 432	retval = 0;
 433	for (;;) {
 434		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
 435		bool can_busy_loop = false;
 436
 437		inp = fds->in; outp = fds->out; exp = fds->ex;
 438		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
 439
 440		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
 441			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
 442			unsigned long res_in = 0, res_out = 0, res_ex = 0;
 
 443
 444			in = *inp++; out = *outp++; ex = *exp++;
 445			all_bits = in | out | ex;
 446			if (all_bits == 0) {
 447				i += BITS_PER_LONG;
 448				continue;
 449			}
 450
 451			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
 452				struct fd f;
 453				if (i >= n)
 454					break;
 455				if (!(bit & all_bits))
 456					continue;
 
 457				f = fdget(i);
 458				if (f.file) {
 459					const struct file_operations *f_op;
 460					f_op = f.file->f_op;
 461					mask = DEFAULT_POLLMASK;
 462					if (f_op->poll) {
 463						wait_key_set(wait, in, out,
 464							     bit, busy_flag);
 465						mask = (*f_op->poll)(f.file, wait);
 466					}
 467					fdput(f);
 468					if ((mask & POLLIN_SET) && (in & bit)) {
 469						res_in |= bit;
 470						retval++;
 471						wait->_qproc = NULL;
 472					}
 473					if ((mask & POLLOUT_SET) && (out & bit)) {
 474						res_out |= bit;
 475						retval++;
 476						wait->_qproc = NULL;
 477					}
 478					if ((mask & POLLEX_SET) && (ex & bit)) {
 479						res_ex |= bit;
 480						retval++;
 481						wait->_qproc = NULL;
 482					}
 483					/* got something, stop busy polling */
 484					if (retval) {
 485						can_busy_loop = false;
 486						busy_flag = 0;
 487
 488					/*
 489					 * only remember a returned
 490					 * POLL_BUSY_LOOP if we asked for it
 491					 */
 492					} else if (busy_flag & mask)
 493						can_busy_loop = true;
 494
 
 
 
 
 
 
 495				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496			}
 497			if (res_in)
 498				*rinp = res_in;
 499			if (res_out)
 500				*routp = res_out;
 501			if (res_ex)
 502				*rexp = res_ex;
 503			cond_resched();
 504		}
 505		wait->_qproc = NULL;
 506		if (retval || timed_out || signal_pending(current))
 507			break;
 508		if (table.error) {
 509			retval = table.error;
 510			break;
 511		}
 512
 513		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 514		if (can_busy_loop && !need_resched()) {
 515			if (!busy_end) {
 516				busy_end = busy_loop_end_time();
 517				continue;
 518			}
 519			if (!busy_loop_timeout(busy_end))
 520				continue;
 521		}
 522		busy_flag = 0;
 523
 524		/*
 525		 * If this is the first loop and we have a timeout
 526		 * given, then we convert to ktime_t and set the to
 527		 * pointer to the expiry value.
 528		 */
 529		if (end_time && !to) {
 530			expire = timespec64_to_ktime(*end_time);
 531			to = &expire;
 532		}
 533
 534		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
 535					   to, slack))
 536			timed_out = 1;
 537	}
 538
 539	poll_freewait(&table);
 540
 541	return retval;
 542}
 543
 544/*
 545 * We can actually return ERESTARTSYS instead of EINTR, but I'd
 546 * like to be certain this leads to no problems. So I return
 547 * EINTR just for safety.
 548 *
 549 * Update: ERESTARTSYS breaks at least the xview clock binary, so
 550 * I'm trying ERESTARTNOHAND which restart only when you want to.
 551 */
 552int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
 553			   fd_set __user *exp, struct timespec64 *end_time)
 554{
 555	fd_set_bits fds;
 556	void *bits;
 557	int ret, max_fds;
 558	size_t size, alloc_size;
 559	struct fdtable *fdt;
 560	/* Allocate small arguments on the stack to save memory and be faster */
 561	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
 562
 563	ret = -EINVAL;
 564	if (n < 0)
 565		goto out_nofds;
 566
 567	/* max_fds can increase, so grab it once to avoid race */
 568	rcu_read_lock();
 569	fdt = files_fdtable(current->files);
 570	max_fds = fdt->max_fds;
 571	rcu_read_unlock();
 572	if (n > max_fds)
 573		n = max_fds;
 574
 575	/*
 576	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
 577	 * since we used fdset we need to allocate memory in units of
 578	 * long-words. 
 579	 */
 580	size = FDS_BYTES(n);
 581	bits = stack_fds;
 582	if (size > sizeof(stack_fds) / 6) {
 583		/* Not enough space in on-stack array; must use kmalloc */
 584		ret = -ENOMEM;
 585		if (size > (SIZE_MAX / 6))
 586			goto out_nofds;
 587
 588		alloc_size = 6 * size;
 589		bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
 590		if (!bits && alloc_size > PAGE_SIZE)
 591			bits = vmalloc(alloc_size);
 592
 593		if (!bits)
 594			goto out_nofds;
 595	}
 596	fds.in      = bits;
 597	fds.out     = bits +   size;
 598	fds.ex      = bits + 2*size;
 599	fds.res_in  = bits + 3*size;
 600	fds.res_out = bits + 4*size;
 601	fds.res_ex  = bits + 5*size;
 602
 603	if ((ret = get_fd_set(n, inp, fds.in)) ||
 604	    (ret = get_fd_set(n, outp, fds.out)) ||
 605	    (ret = get_fd_set(n, exp, fds.ex)))
 606		goto out;
 607	zero_fd_set(n, fds.res_in);
 608	zero_fd_set(n, fds.res_out);
 609	zero_fd_set(n, fds.res_ex);
 610
 611	ret = do_select(n, &fds, end_time);
 612
 613	if (ret < 0)
 614		goto out;
 615	if (!ret) {
 616		ret = -ERESTARTNOHAND;
 617		if (signal_pending(current))
 618			goto out;
 619		ret = 0;
 620	}
 621
 622	if (set_fd_set(n, inp, fds.res_in) ||
 623	    set_fd_set(n, outp, fds.res_out) ||
 624	    set_fd_set(n, exp, fds.res_ex))
 625		ret = -EFAULT;
 626
 627out:
 628	if (bits != stack_fds)
 629		kvfree(bits);
 630out_nofds:
 631	return ret;
 632}
 633
 634SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 635		fd_set __user *, exp, struct timeval __user *, tvp)
 636{
 637	struct timespec64 end_time, *to = NULL;
 638	struct timeval tv;
 639	int ret;
 640
 641	if (tvp) {
 642		if (copy_from_user(&tv, tvp, sizeof(tv)))
 643			return -EFAULT;
 644
 645		to = &end_time;
 646		if (poll_select_set_timeout(to,
 647				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
 648				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
 649			return -EINVAL;
 650	}
 651
 652	ret = core_sys_select(n, inp, outp, exp, to);
 653	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
 
 654
 655	return ret;
 
 
 
 656}
 657
 658static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
 659		       fd_set __user *exp, struct timespec __user *tsp,
 660		       const sigset_t __user *sigmask, size_t sigsetsize)
 
 661{
 662	sigset_t ksigmask, sigsaved;
 663	struct timespec ts;
 664	struct timespec64 ts64, end_time, *to = NULL;
 665	int ret;
 666
 667	if (tsp) {
 668		if (copy_from_user(&ts, tsp, sizeof(ts)))
 669			return -EFAULT;
 670		ts64 = timespec_to_timespec64(ts);
 
 
 
 
 
 
 
 
 
 671
 672		to = &end_time;
 673		if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec))
 674			return -EINVAL;
 675	}
 676
 677	if (sigmask) {
 678		/* XXX: Don't preclude handling different sized sigset_t's.  */
 679		if (sigsetsize != sizeof(sigset_t))
 680			return -EINVAL;
 681		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
 682			return -EFAULT;
 683
 684		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
 685		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 686	}
 687
 688	ret = core_sys_select(n, inp, outp, exp, to);
 689	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 690
 691	if (ret == -ERESTARTNOHAND) {
 692		/*
 693		 * Don't restore the signal mask yet. Let do_signal() deliver
 694		 * the signal on the way back to userspace, before the signal
 695		 * mask is restored.
 696		 */
 697		if (sigmask) {
 698			memcpy(&current->saved_sigmask, &sigsaved,
 699					sizeof(sigsaved));
 700			set_restore_sigmask();
 701		}
 702	} else if (sigmask)
 703		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 704
 705	return ret;
 706}
 707
 708/*
 709 * Most architectures can't handle 7-argument syscalls. So we provide a
 710 * 6-argument version where the sixth argument is a pointer to a structure
 711 * which has a pointer to the sigset_t itself followed by a size_t containing
 712 * the sigset size.
 713 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 714SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 715		fd_set __user *, exp, struct timespec __user *, tsp,
 716		void __user *, sig)
 717{
 718	size_t sigsetsize = 0;
 719	sigset_t __user *up = NULL;
 720
 721	if (sig) {
 722		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
 723		    || __get_user(up, (sigset_t __user * __user *)sig)
 724		    || __get_user(sigsetsize,
 725				(size_t __user *)(sig+sizeof(void *))))
 726			return -EFAULT;
 727	}
 728
 729	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 730}
 731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732#ifdef __ARCH_WANT_SYS_OLD_SELECT
 733struct sel_arg_struct {
 734	unsigned long n;
 735	fd_set __user *inp, *outp, *exp;
 736	struct timeval __user *tvp;
 737};
 738
 739SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
 740{
 741	struct sel_arg_struct a;
 742
 743	if (copy_from_user(&a, arg, sizeof(a)))
 744		return -EFAULT;
 745	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
 746}
 747#endif
 748
 749struct poll_list {
 750	struct poll_list *next;
 751	int len;
 752	struct pollfd entries[0];
 753};
 754
 755#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
 756
 757/*
 758 * Fish for pollable events on the pollfd->fd file descriptor. We're only
 759 * interested in events matching the pollfd->events mask, and the result
 760 * matching that mask is both recorded in pollfd->revents and returned. The
 761 * pwait poll_table will be used by the fd-provided poll handler for waiting,
 762 * if pwait->_qproc is non-NULL.
 763 */
 764static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
 765				     bool *can_busy_poll,
 766				     unsigned int busy_flag)
 767{
 768	unsigned int mask;
 769	int fd;
 
 770
 771	mask = 0;
 772	fd = pollfd->fd;
 773	if (fd >= 0) {
 774		struct fd f = fdget(fd);
 775		mask = POLLNVAL;
 776		if (f.file) {
 777			mask = DEFAULT_POLLMASK;
 778			if (f.file->f_op->poll) {
 779				pwait->_key = pollfd->events|POLLERR|POLLHUP;
 780				pwait->_key |= busy_flag;
 781				mask = f.file->f_op->poll(f.file, pwait);
 782				if (mask & busy_flag)
 783					*can_busy_poll = true;
 784			}
 785			/* Mask out unneeded events. */
 786			mask &= pollfd->events | POLLERR | POLLHUP;
 787			fdput(f);
 788		}
 789	}
 790	pollfd->revents = mask;
 791
 
 
 
 792	return mask;
 793}
 794
 795static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
 796		   struct timespec64 *end_time)
 797{
 798	poll_table* pt = &wait->pt;
 799	ktime_t expire, *to = NULL;
 800	int timed_out = 0, count = 0;
 801	u64 slack = 0;
 802	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 803	unsigned long busy_end = 0;
 804
 805	/* Optimise the no-wait case */
 806	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 807		pt->_qproc = NULL;
 808		timed_out = 1;
 809	}
 810
 811	if (end_time && !timed_out)
 812		slack = select_estimate_accuracy(end_time);
 813
 814	for (;;) {
 815		struct poll_list *walk;
 816		bool can_busy_loop = false;
 817
 818		for (walk = list; walk != NULL; walk = walk->next) {
 819			struct pollfd * pfd, * pfd_end;
 820
 821			pfd = walk->entries;
 822			pfd_end = pfd + walk->len;
 823			for (; pfd != pfd_end; pfd++) {
 824				/*
 825				 * Fish for events. If we found one, record it
 826				 * and kill poll_table->_qproc, so we don't
 827				 * needlessly register any other waiters after
 828				 * this. They'll get immediately deregistered
 829				 * when we break out and return.
 830				 */
 831				if (do_pollfd(pfd, pt, &can_busy_loop,
 832					      busy_flag)) {
 833					count++;
 834					pt->_qproc = NULL;
 835					/* found something, stop busy polling */
 836					busy_flag = 0;
 837					can_busy_loop = false;
 838				}
 839			}
 840		}
 841		/*
 842		 * All waiters have already been registered, so don't provide
 843		 * a poll_table->_qproc to them on the next loop iteration.
 844		 */
 845		pt->_qproc = NULL;
 846		if (!count) {
 847			count = wait->error;
 848			if (signal_pending(current))
 849				count = -EINTR;
 850		}
 851		if (count || timed_out)
 852			break;
 853
 854		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 855		if (can_busy_loop && !need_resched()) {
 856			if (!busy_end) {
 857				busy_end = busy_loop_end_time();
 858				continue;
 859			}
 860			if (!busy_loop_timeout(busy_end))
 861				continue;
 862		}
 863		busy_flag = 0;
 864
 865		/*
 866		 * If this is the first loop and we have a timeout
 867		 * given, then we convert to ktime_t and set the to
 868		 * pointer to the expiry value.
 869		 */
 870		if (end_time && !to) {
 871			expire = timespec64_to_ktime(*end_time);
 872			to = &expire;
 873		}
 874
 875		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
 876			timed_out = 1;
 877	}
 878	return count;
 879}
 880
 881#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
 882			sizeof(struct pollfd))
 883
 884int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
 885		struct timespec64 *end_time)
 886{
 887	struct poll_wqueues table;
 888 	int err = -EFAULT, fdcount, len, size;
 889	/* Allocate small arguments on the stack to save memory and be
 890	   faster - use long to make sure the buffer is aligned properly
 891	   on 64 bit archs to avoid unaligned access */
 892	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
 893	struct poll_list *const head = (struct poll_list *)stack_pps;
 894 	struct poll_list *walk = head;
 895 	unsigned long todo = nfds;
 896
 897	if (nfds > rlimit(RLIMIT_NOFILE))
 898		return -EINVAL;
 899
 900	len = min_t(unsigned int, nfds, N_STACK_PPS);
 901	for (;;) {
 902		walk->next = NULL;
 903		walk->len = len;
 904		if (!len)
 905			break;
 906
 907		if (copy_from_user(walk->entries, ufds + nfds-todo,
 908					sizeof(struct pollfd) * walk->len))
 909			goto out_fds;
 910
 911		todo -= walk->len;
 912		if (!todo)
 913			break;
 914
 915		len = min(todo, POLLFD_PER_PAGE);
 916		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
 917		walk = walk->next = kmalloc(size, GFP_KERNEL);
 918		if (!walk) {
 919			err = -ENOMEM;
 920			goto out_fds;
 921		}
 922	}
 923
 924	poll_initwait(&table);
 925	fdcount = do_poll(head, &table, end_time);
 926	poll_freewait(&table);
 927
 
 
 
 928	for (walk = head; walk; walk = walk->next) {
 929		struct pollfd *fds = walk->entries;
 930		int j;
 931
 932		for (j = 0; j < walk->len; j++, ufds++)
 933			if (__put_user(fds[j].revents, &ufds->revents))
 934				goto out_fds;
 935  	}
 
 936
 937	err = fdcount;
 938out_fds:
 939	walk = head->next;
 940	while (walk) {
 941		struct poll_list *pos = walk;
 942		walk = walk->next;
 943		kfree(pos);
 944	}
 945
 946	return err;
 
 
 
 
 
 947}
 948
 949static long do_restart_poll(struct restart_block *restart_block)
 950{
 951	struct pollfd __user *ufds = restart_block->poll.ufds;
 952	int nfds = restart_block->poll.nfds;
 953	struct timespec64 *to = NULL, end_time;
 954	int ret;
 955
 956	if (restart_block->poll.has_timeout) {
 957		end_time.tv_sec = restart_block->poll.tv_sec;
 958		end_time.tv_nsec = restart_block->poll.tv_nsec;
 959		to = &end_time;
 960	}
 961
 962	ret = do_sys_poll(ufds, nfds, to);
 963
 964	if (ret == -EINTR) {
 965		restart_block->fn = do_restart_poll;
 966		ret = -ERESTART_RESTARTBLOCK;
 967	}
 968	return ret;
 969}
 970
 971SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
 972		int, timeout_msecs)
 973{
 974	struct timespec64 end_time, *to = NULL;
 975	int ret;
 976
 977	if (timeout_msecs >= 0) {
 978		to = &end_time;
 979		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
 980			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
 981	}
 982
 983	ret = do_sys_poll(ufds, nfds, to);
 984
 985	if (ret == -EINTR) {
 986		struct restart_block *restart_block;
 987
 988		restart_block = &current->restart_block;
 989		restart_block->fn = do_restart_poll;
 990		restart_block->poll.ufds = ufds;
 991		restart_block->poll.nfds = nfds;
 992
 993		if (timeout_msecs >= 0) {
 994			restart_block->poll.tv_sec = end_time.tv_sec;
 995			restart_block->poll.tv_nsec = end_time.tv_nsec;
 996			restart_block->poll.has_timeout = 1;
 997		} else
 998			restart_block->poll.has_timeout = 0;
 999
1000		ret = -ERESTART_RESTARTBLOCK;
1001	}
1002	return ret;
1003}
1004
1005SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1006		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1007		size_t, sigsetsize)
1008{
1009	sigset_t ksigmask, sigsaved;
1010	struct timespec ts;
1011	struct timespec64 end_time, *to = NULL;
1012	int ret;
1013
1014	if (tsp) {
1015		if (copy_from_user(&ts, tsp, sizeof(ts)))
1016			return -EFAULT;
1017
1018		to = &end_time;
1019		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1020			return -EINVAL;
1021	}
1022
1023	if (sigmask) {
1024		/* XXX: Don't preclude handling different sized sigset_t's.  */
1025		if (sigsetsize != sizeof(sigset_t))
1026			return -EINVAL;
1027		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028			return -EFAULT;
1029
1030		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1031		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 
1032	}
1033
 
 
 
 
1034	ret = do_sys_poll(ufds, nfds, to);
 
 
 
1035
1036	/* We can restart this syscall, usually */
1037	if (ret == -EINTR) {
1038		/*
1039		 * Don't restore the signal mask yet. Let do_signal() deliver
1040		 * the signal on the way back to userspace, before the signal
1041		 * mask is restored.
1042		 */
1043		if (sigmask) {
1044			memcpy(&current->saved_sigmask, &sigsaved,
1045					sizeof(sigsaved));
1046			set_restore_sigmask();
1047		}
1048		ret = -ERESTARTNOHAND;
1049	} else if (sigmask)
1050		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051
1052	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1053
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054	return ret;
1055}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This file contains the procedures for the handling of select and poll
   4 *
   5 * Created for Linux based loosely upon Mathius Lattner's minix
   6 * patches by Peter MacDonald. Heavily edited by Linus.
   7 *
   8 *  4 February 1994
   9 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
  10 *     flag set in its personality we do *not* modify the given timeout
  11 *     parameter to reflect time remaining.
  12 *
  13 *  24 January 2000
  14 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
  15 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
  16 */
  17
  18#include <linux/compat.h>
  19#include <linux/kernel.h>
  20#include <linux/sched/signal.h>
  21#include <linux/sched/rt.h>
  22#include <linux/syscalls.h>
  23#include <linux/export.h>
  24#include <linux/slab.h>
  25#include <linux/poll.h>
  26#include <linux/personality.h> /* for STICKY_TIMEOUTS */
  27#include <linux/file.h>
  28#include <linux/fdtable.h>
  29#include <linux/fs.h>
  30#include <linux/rcupdate.h>
  31#include <linux/hrtimer.h>
 
  32#include <linux/freezer.h>
  33#include <net/busy_poll.h>
  34#include <linux/vmalloc.h>
  35
  36#include <linux/uaccess.h>
  37
  38
  39/*
  40 * Estimate expected accuracy in ns from a timeval.
  41 *
  42 * After quite a bit of churning around, we've settled on
  43 * a simple thing of taking 0.1% of the timeout as the
  44 * slack, with a cap of 100 msec.
  45 * "nice" tasks get a 0.5% slack instead.
  46 *
  47 * Consider this comment an open invitation to come up with even
  48 * better solutions..
  49 */
  50
  51#define MAX_SLACK	(100 * NSEC_PER_MSEC)
  52
  53static long __estimate_accuracy(struct timespec64 *tv)
  54{
  55	long slack;
  56	int divfactor = 1000;
  57
  58	if (tv->tv_sec < 0)
  59		return 0;
  60
  61	if (task_nice(current) > 0)
  62		divfactor = divfactor / 5;
  63
  64	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
  65		return MAX_SLACK;
  66
  67	slack = tv->tv_nsec / divfactor;
  68	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
  69
  70	if (slack > MAX_SLACK)
  71		return MAX_SLACK;
  72
  73	return slack;
  74}
  75
  76u64 select_estimate_accuracy(struct timespec64 *tv)
  77{
  78	u64 ret;
  79	struct timespec64 now;
  80
  81	/*
  82	 * Realtime tasks get a slack of 0 for obvious reasons.
  83	 */
  84
  85	if (rt_task(current))
  86		return 0;
  87
  88	ktime_get_ts64(&now);
  89	now = timespec64_sub(*tv, now);
  90	ret = __estimate_accuracy(&now);
  91	if (ret < current->timer_slack_ns)
  92		return current->timer_slack_ns;
  93	return ret;
  94}
  95
  96
  97
  98struct poll_table_page {
  99	struct poll_table_page * next;
 100	struct poll_table_entry * entry;
 101	struct poll_table_entry entries[];
 102};
 103
 104#define POLL_TABLE_FULL(table) \
 105	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
 106
 107/*
 108 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
 109 * I have rewritten this, taking some shortcuts: This code may not be easy to
 110 * follow, but it should be free of race-conditions, and it's practical. If you
 111 * understand what I'm doing here, then you understand how the linux
 112 * sleep/wakeup mechanism works.
 113 *
 114 * Two very simple procedures, poll_wait() and poll_freewait() make all the
 115 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
 116 * as all select/poll functions have to call it to add an entry to the
 117 * poll table.
 118 */
 119static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 120		       poll_table *p);
 121
 122void poll_initwait(struct poll_wqueues *pwq)
 123{
 124	init_poll_funcptr(&pwq->pt, __pollwait);
 125	pwq->polling_task = current;
 126	pwq->triggered = 0;
 127	pwq->error = 0;
 128	pwq->table = NULL;
 129	pwq->inline_index = 0;
 130}
 131EXPORT_SYMBOL(poll_initwait);
 132
 133static void free_poll_entry(struct poll_table_entry *entry)
 134{
 135	remove_wait_queue(entry->wait_address, &entry->wait);
 136	fput(entry->filp);
 137}
 138
 139void poll_freewait(struct poll_wqueues *pwq)
 140{
 141	struct poll_table_page * p = pwq->table;
 142	int i;
 143	for (i = 0; i < pwq->inline_index; i++)
 144		free_poll_entry(pwq->inline_entries + i);
 145	while (p) {
 146		struct poll_table_entry * entry;
 147		struct poll_table_page *old;
 148
 149		entry = p->entry;
 150		do {
 151			entry--;
 152			free_poll_entry(entry);
 153		} while (entry > p->entries);
 154		old = p;
 155		p = p->next;
 156		free_page((unsigned long) old);
 157	}
 158}
 159EXPORT_SYMBOL(poll_freewait);
 160
 161static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 162{
 163	struct poll_table_page *table = p->table;
 164
 165	if (p->inline_index < N_INLINE_POLL_ENTRIES)
 166		return p->inline_entries + p->inline_index++;
 167
 168	if (!table || POLL_TABLE_FULL(table)) {
 169		struct poll_table_page *new_table;
 170
 171		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
 172		if (!new_table) {
 173			p->error = -ENOMEM;
 174			return NULL;
 175		}
 176		new_table->entry = new_table->entries;
 177		new_table->next = table;
 178		p->table = new_table;
 179		table = new_table;
 180	}
 181
 182	return table->entry++;
 183}
 184
 185static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 186{
 187	struct poll_wqueues *pwq = wait->private;
 188	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
 189
 190	/*
 191	 * Although this function is called under waitqueue lock, LOCK
 192	 * doesn't imply write barrier and the users expect write
 193	 * barrier semantics on wakeup functions.  The following
 194	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
 195	 * and is paired with smp_store_mb() in poll_schedule_timeout.
 196	 */
 197	smp_wmb();
 198	pwq->triggered = 1;
 199
 200	/*
 201	 * Perform the default wake up operation using a dummy
 202	 * waitqueue.
 203	 *
 204	 * TODO: This is hacky but there currently is no interface to
 205	 * pass in @sync.  @sync is scheduled to be removed and once
 206	 * that happens, wake_up_process() can be used directly.
 207	 */
 208	return default_wake_function(&dummy_wait, mode, sync, key);
 209}
 210
 211static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 212{
 213	struct poll_table_entry *entry;
 214
 215	entry = container_of(wait, struct poll_table_entry, wait);
 216	if (key && !(key_to_poll(key) & entry->key))
 217		return 0;
 218	return __pollwake(wait, mode, sync, key);
 219}
 220
 221/* Add a new entry */
 222static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 223				poll_table *p)
 224{
 225	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
 226	struct poll_table_entry *entry = poll_get_entry(pwq);
 227	if (!entry)
 228		return;
 229	entry->filp = get_file(filp);
 230	entry->wait_address = wait_address;
 231	entry->key = p->_key;
 232	init_waitqueue_func_entry(&entry->wait, pollwake);
 233	entry->wait.private = pwq;
 234	add_wait_queue(wait_address, &entry->wait);
 235}
 236
 237static int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 238			  ktime_t *expires, unsigned long slack)
 239{
 240	int rc = -EINTR;
 241
 242	set_current_state(state);
 243	if (!pwq->triggered)
 244		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
 245	__set_current_state(TASK_RUNNING);
 246
 247	/*
 248	 * Prepare for the next iteration.
 249	 *
 250	 * The following smp_store_mb() serves two purposes.  First, it's
 251	 * the counterpart rmb of the wmb in pollwake() such that data
 252	 * written before wake up is always visible after wake up.
 253	 * Second, the full barrier guarantees that triggered clearing
 254	 * doesn't pass event check of the next iteration.  Note that
 255	 * this problem doesn't exist for the first iteration as
 256	 * add_wait_queue() has full barrier semantics.
 257	 */
 258	smp_store_mb(pwq->triggered, 0);
 259
 260	return rc;
 261}
 
 262
 263/**
 264 * poll_select_set_timeout - helper function to setup the timeout value
 265 * @to:		pointer to timespec64 variable for the final timeout
 266 * @sec:	seconds (from user space)
 267 * @nsec:	nanoseconds (from user space)
 268 *
 269 * Note, we do not use a timespec for the user space value here, That
 270 * way we can use the function for timeval and compat interfaces as well.
 271 *
 272 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
 273 */
 274int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
 275{
 276	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
 277
 278	if (!timespec64_valid(&ts))
 279		return -EINVAL;
 280
 281	/* Optimize for the zero timeout value here */
 282	if (!sec && !nsec) {
 283		to->tv_sec = to->tv_nsec = 0;
 284	} else {
 285		ktime_get_ts64(to);
 286		*to = timespec64_add_safe(*to, ts);
 287	}
 288	return 0;
 289}
 290
 291enum poll_time_type {
 292	PT_TIMEVAL = 0,
 293	PT_OLD_TIMEVAL = 1,
 294	PT_TIMESPEC = 2,
 295	PT_OLD_TIMESPEC = 3,
 296};
 297
 298static int poll_select_finish(struct timespec64 *end_time,
 299			      void __user *p,
 300			      enum poll_time_type pt_type, int ret)
 301{
 302	struct timespec64 rts;
 303
 304	restore_saved_sigmask_unless(ret == -ERESTARTNOHAND);
 305
 306	if (!p)
 307		return ret;
 308
 309	if (current->personality & STICKY_TIMEOUTS)
 310		goto sticky;
 311
 312	/* No update for zero timeout */
 313	if (!end_time->tv_sec && !end_time->tv_nsec)
 314		return ret;
 315
 316	ktime_get_ts64(&rts);
 317	rts = timespec64_sub(*end_time, rts);
 318	if (rts.tv_sec < 0)
 319		rts.tv_sec = rts.tv_nsec = 0;
 320
 321
 322	switch (pt_type) {
 323	case PT_TIMEVAL:
 324		{
 325			struct __kernel_old_timeval rtv;
 326
 327			if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
 328				memset(&rtv, 0, sizeof(rtv));
 329			rtv.tv_sec = rts.tv_sec;
 330			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
 331			if (!copy_to_user(p, &rtv, sizeof(rtv)))
 332				return ret;
 333		}
 334		break;
 335	case PT_OLD_TIMEVAL:
 336		{
 337			struct old_timeval32 rtv;
 338
 339			rtv.tv_sec = rts.tv_sec;
 340			rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
 341			if (!copy_to_user(p, &rtv, sizeof(rtv)))
 342				return ret;
 343		}
 344		break;
 345	case PT_TIMESPEC:
 346		if (!put_timespec64(&rts, p))
 347			return ret;
 348		break;
 349	case PT_OLD_TIMESPEC:
 350		if (!put_old_timespec32(&rts, p))
 351			return ret;
 352		break;
 353	default:
 354		BUG();
 355	}
 356	/*
 357	 * If an application puts its timeval in read-only memory, we
 358	 * don't want the Linux-specific update to the timeval to
 359	 * cause a fault after the select has completed
 360	 * successfully. However, because we're not updating the
 361	 * timeval, we can't restart the system call.
 362	 */
 363
 364sticky:
 365	if (ret == -ERESTARTNOHAND)
 366		ret = -EINTR;
 367	return ret;
 368}
 369
 370/*
 371 * Scalable version of the fd_set.
 372 */
 373
 374typedef struct {
 375	unsigned long *in, *out, *ex;
 376	unsigned long *res_in, *res_out, *res_ex;
 377} fd_set_bits;
 378
 379/*
 380 * How many longwords for "nr" bits?
 381 */
 382#define FDS_BITPERLONG	(8*sizeof(long))
 383#define FDS_LONGS(nr)	(((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
 384#define FDS_BYTES(nr)	(FDS_LONGS(nr)*sizeof(long))
 385
 386/*
 387 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
 388 */
 389static inline
 390int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
 391{
 392	nr = FDS_BYTES(nr);
 393	if (ufdset)
 394		return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
 395
 396	memset(fdset, 0, nr);
 397	return 0;
 398}
 399
 400static inline unsigned long __must_check
 401set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
 402{
 403	if (ufdset)
 404		return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
 405	return 0;
 406}
 407
 408static inline
 409void zero_fd_set(unsigned long nr, unsigned long *fdset)
 410{
 411	memset(fdset, 0, FDS_BYTES(nr));
 412}
 413
 414#define FDS_IN(fds, n)		(fds->in + n)
 415#define FDS_OUT(fds, n)		(fds->out + n)
 416#define FDS_EX(fds, n)		(fds->ex + n)
 417
 418#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
 419
 420static int max_select_fd(unsigned long n, fd_set_bits *fds)
 421{
 422	unsigned long *open_fds;
 423	unsigned long set;
 424	int max;
 425	struct fdtable *fdt;
 426
 427	/* handle last in-complete long-word first */
 428	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
 429	n /= BITS_PER_LONG;
 430	fdt = files_fdtable(current->files);
 431	open_fds = fdt->open_fds + n;
 432	max = 0;
 433	if (set) {
 434		set &= BITS(fds, n);
 435		if (set) {
 436			if (!(set & ~*open_fds))
 437				goto get_max;
 438			return -EBADF;
 439		}
 440	}
 441	while (n) {
 442		open_fds--;
 443		n--;
 444		set = BITS(fds, n);
 445		if (!set)
 446			continue;
 447		if (set & ~*open_fds)
 448			return -EBADF;
 449		if (max)
 450			continue;
 451get_max:
 452		do {
 453			max++;
 454			set >>= 1;
 455		} while (set);
 456		max += n * BITS_PER_LONG;
 457	}
 458
 459	return max;
 460}
 461
 462#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
 463			EPOLLNVAL)
 464#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
 465			 EPOLLNVAL)
 466#define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
 467
 468static inline void wait_key_set(poll_table *wait, unsigned long in,
 469				unsigned long out, unsigned long bit,
 470				__poll_t ll_flag)
 471{
 472	wait->_key = POLLEX_SET | ll_flag;
 473	if (in & bit)
 474		wait->_key |= POLLIN_SET;
 475	if (out & bit)
 476		wait->_key |= POLLOUT_SET;
 477}
 478
 479static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
 480{
 481	ktime_t expire, *to = NULL;
 482	struct poll_wqueues table;
 483	poll_table *wait;
 484	int retval, i, timed_out = 0;
 485	u64 slack = 0;
 486	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 487	unsigned long busy_start = 0;
 488
 489	rcu_read_lock();
 490	retval = max_select_fd(n, fds);
 491	rcu_read_unlock();
 492
 493	if (retval < 0)
 494		return retval;
 495	n = retval;
 496
 497	poll_initwait(&table);
 498	wait = &table.pt;
 499	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 500		wait->_qproc = NULL;
 501		timed_out = 1;
 502	}
 503
 504	if (end_time && !timed_out)
 505		slack = select_estimate_accuracy(end_time);
 506
 507	retval = 0;
 508	for (;;) {
 509		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
 510		bool can_busy_loop = false;
 511
 512		inp = fds->in; outp = fds->out; exp = fds->ex;
 513		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
 514
 515		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
 516			unsigned long in, out, ex, all_bits, bit = 1, j;
 517			unsigned long res_in = 0, res_out = 0, res_ex = 0;
 518			__poll_t mask;
 519
 520			in = *inp++; out = *outp++; ex = *exp++;
 521			all_bits = in | out | ex;
 522			if (all_bits == 0) {
 523				i += BITS_PER_LONG;
 524				continue;
 525			}
 526
 527			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
 528				struct fd f;
 529				if (i >= n)
 530					break;
 531				if (!(bit & all_bits))
 532					continue;
 533				mask = EPOLLNVAL;
 534				f = fdget(i);
 535				if (f.file) {
 536					wait_key_set(wait, in, out, bit,
 537						     busy_flag);
 538					mask = vfs_poll(f.file, wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539
 540					fdput(f);
 541				}
 542				if ((mask & POLLIN_SET) && (in & bit)) {
 543					res_in |= bit;
 544					retval++;
 545					wait->_qproc = NULL;
 546				}
 547				if ((mask & POLLOUT_SET) && (out & bit)) {
 548					res_out |= bit;
 549					retval++;
 550					wait->_qproc = NULL;
 551				}
 552				if ((mask & POLLEX_SET) && (ex & bit)) {
 553					res_ex |= bit;
 554					retval++;
 555					wait->_qproc = NULL;
 556				}
 557				/* got something, stop busy polling */
 558				if (retval) {
 559					can_busy_loop = false;
 560					busy_flag = 0;
 561
 562				/*
 563				 * only remember a returned
 564				 * POLL_BUSY_LOOP if we asked for it
 565				 */
 566				} else if (busy_flag & mask)
 567					can_busy_loop = true;
 568
 569			}
 570			if (res_in)
 571				*rinp = res_in;
 572			if (res_out)
 573				*routp = res_out;
 574			if (res_ex)
 575				*rexp = res_ex;
 576			cond_resched();
 577		}
 578		wait->_qproc = NULL;
 579		if (retval || timed_out || signal_pending(current))
 580			break;
 581		if (table.error) {
 582			retval = table.error;
 583			break;
 584		}
 585
 586		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 587		if (can_busy_loop && !need_resched()) {
 588			if (!busy_start) {
 589				busy_start = busy_loop_current_time();
 590				continue;
 591			}
 592			if (!busy_loop_timeout(busy_start))
 593				continue;
 594		}
 595		busy_flag = 0;
 596
 597		/*
 598		 * If this is the first loop and we have a timeout
 599		 * given, then we convert to ktime_t and set the to
 600		 * pointer to the expiry value.
 601		 */
 602		if (end_time && !to) {
 603			expire = timespec64_to_ktime(*end_time);
 604			to = &expire;
 605		}
 606
 607		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
 608					   to, slack))
 609			timed_out = 1;
 610	}
 611
 612	poll_freewait(&table);
 613
 614	return retval;
 615}
 616
 617/*
 618 * We can actually return ERESTARTSYS instead of EINTR, but I'd
 619 * like to be certain this leads to no problems. So I return
 620 * EINTR just for safety.
 621 *
 622 * Update: ERESTARTSYS breaks at least the xview clock binary, so
 623 * I'm trying ERESTARTNOHAND which restart only when you want to.
 624 */
 625int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
 626			   fd_set __user *exp, struct timespec64 *end_time)
 627{
 628	fd_set_bits fds;
 629	void *bits;
 630	int ret, max_fds;
 631	size_t size, alloc_size;
 632	struct fdtable *fdt;
 633	/* Allocate small arguments on the stack to save memory and be faster */
 634	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
 635
 636	ret = -EINVAL;
 637	if (n < 0)
 638		goto out_nofds;
 639
 640	/* max_fds can increase, so grab it once to avoid race */
 641	rcu_read_lock();
 642	fdt = files_fdtable(current->files);
 643	max_fds = fdt->max_fds;
 644	rcu_read_unlock();
 645	if (n > max_fds)
 646		n = max_fds;
 647
 648	/*
 649	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
 650	 * since we used fdset we need to allocate memory in units of
 651	 * long-words. 
 652	 */
 653	size = FDS_BYTES(n);
 654	bits = stack_fds;
 655	if (size > sizeof(stack_fds) / 6) {
 656		/* Not enough space in on-stack array; must use kmalloc */
 657		ret = -ENOMEM;
 658		if (size > (SIZE_MAX / 6))
 659			goto out_nofds;
 660
 661		alloc_size = 6 * size;
 662		bits = kvmalloc(alloc_size, GFP_KERNEL);
 
 
 
 663		if (!bits)
 664			goto out_nofds;
 665	}
 666	fds.in      = bits;
 667	fds.out     = bits +   size;
 668	fds.ex      = bits + 2*size;
 669	fds.res_in  = bits + 3*size;
 670	fds.res_out = bits + 4*size;
 671	fds.res_ex  = bits + 5*size;
 672
 673	if ((ret = get_fd_set(n, inp, fds.in)) ||
 674	    (ret = get_fd_set(n, outp, fds.out)) ||
 675	    (ret = get_fd_set(n, exp, fds.ex)))
 676		goto out;
 677	zero_fd_set(n, fds.res_in);
 678	zero_fd_set(n, fds.res_out);
 679	zero_fd_set(n, fds.res_ex);
 680
 681	ret = do_select(n, &fds, end_time);
 682
 683	if (ret < 0)
 684		goto out;
 685	if (!ret) {
 686		ret = -ERESTARTNOHAND;
 687		if (signal_pending(current))
 688			goto out;
 689		ret = 0;
 690	}
 691
 692	if (set_fd_set(n, inp, fds.res_in) ||
 693	    set_fd_set(n, outp, fds.res_out) ||
 694	    set_fd_set(n, exp, fds.res_ex))
 695		ret = -EFAULT;
 696
 697out:
 698	if (bits != stack_fds)
 699		kvfree(bits);
 700out_nofds:
 701	return ret;
 702}
 703
 704static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
 705		       fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
 706{
 707	struct timespec64 end_time, *to = NULL;
 708	struct __kernel_old_timeval tv;
 709	int ret;
 710
 711	if (tvp) {
 712		if (copy_from_user(&tv, tvp, sizeof(tv)))
 713			return -EFAULT;
 714
 715		to = &end_time;
 716		if (poll_select_set_timeout(to,
 717				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
 718				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
 719			return -EINVAL;
 720	}
 721
 722	ret = core_sys_select(n, inp, outp, exp, to);
 723	return poll_select_finish(&end_time, tvp, PT_TIMEVAL, ret);
 724}
 725
 726SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 727		fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp)
 728{
 729	return kern_select(n, inp, outp, exp, tvp);
 730}
 731
 732static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
 733		       fd_set __user *exp, void __user *tsp,
 734		       const sigset_t __user *sigmask, size_t sigsetsize,
 735		       enum poll_time_type type)
 736{
 737	struct timespec64 ts, end_time, *to = NULL;
 
 
 738	int ret;
 739
 740	if (tsp) {
 741		switch (type) {
 742		case PT_TIMESPEC:
 743			if (get_timespec64(&ts, tsp))
 744				return -EFAULT;
 745			break;
 746		case PT_OLD_TIMESPEC:
 747			if (get_old_timespec32(&ts, tsp))
 748				return -EFAULT;
 749			break;
 750		default:
 751			BUG();
 752		}
 753
 754		to = &end_time;
 755		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
 756			return -EINVAL;
 757	}
 758
 759	ret = set_user_sigmask(sigmask, sigsetsize);
 760	if (ret)
 761		return ret;
 
 
 
 
 
 
 
 762
 763	ret = core_sys_select(n, inp, outp, exp, to);
 764	return poll_select_finish(&end_time, tsp, type, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765}
 766
 767/*
 768 * Most architectures can't handle 7-argument syscalls. So we provide a
 769 * 6-argument version where the sixth argument is a pointer to a structure
 770 * which has a pointer to the sigset_t itself followed by a size_t containing
 771 * the sigset size.
 772 */
 773struct sigset_argpack {
 774	sigset_t __user *p;
 775	size_t size;
 776};
 777
 778static inline int get_sigset_argpack(struct sigset_argpack *to,
 779				     struct sigset_argpack __user *from)
 780{
 781	// the path is hot enough for overhead of copy_from_user() to matter
 782	if (from) {
 783		if (!user_read_access_begin(from, sizeof(*from)))
 784			return -EFAULT;
 785		unsafe_get_user(to->p, &from->p, Efault);
 786		unsafe_get_user(to->size, &from->size, Efault);
 787		user_read_access_end();
 788	}
 789	return 0;
 790Efault:
 791	user_access_end();
 792	return -EFAULT;
 793}
 794
 795SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 796		fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
 797		void __user *, sig)
 798{
 799	struct sigset_argpack x = {NULL, 0};
 
 800
 801	if (get_sigset_argpack(&x, sig))
 802		return -EFAULT;
 
 
 
 
 
 803
 804	return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
 805}
 806
 807#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
 808
 809SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
 810		fd_set __user *, exp, struct old_timespec32 __user *, tsp,
 811		void __user *, sig)
 812{
 813	struct sigset_argpack x = {NULL, 0};
 814
 815	if (get_sigset_argpack(&x, sig))
 816		return -EFAULT;
 817
 818	return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
 819}
 820
 821#endif
 822
 823#ifdef __ARCH_WANT_SYS_OLD_SELECT
 824struct sel_arg_struct {
 825	unsigned long n;
 826	fd_set __user *inp, *outp, *exp;
 827	struct __kernel_old_timeval __user *tvp;
 828};
 829
 830SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
 831{
 832	struct sel_arg_struct a;
 833
 834	if (copy_from_user(&a, arg, sizeof(a)))
 835		return -EFAULT;
 836	return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp);
 837}
 838#endif
 839
 840struct poll_list {
 841	struct poll_list *next;
 842	int len;
 843	struct pollfd entries[];
 844};
 845
 846#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
 847
 848/*
 849 * Fish for pollable events on the pollfd->fd file descriptor. We're only
 850 * interested in events matching the pollfd->events mask, and the result
 851 * matching that mask is both recorded in pollfd->revents and returned. The
 852 * pwait poll_table will be used by the fd-provided poll handler for waiting,
 853 * if pwait->_qproc is non-NULL.
 854 */
 855static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
 856				     bool *can_busy_poll,
 857				     __poll_t busy_flag)
 858{
 859	int fd = pollfd->fd;
 860	__poll_t mask = 0, filter;
 861	struct fd f;
 862
 863	if (fd < 0)
 864		goto out;
 865	mask = EPOLLNVAL;
 866	f = fdget(fd);
 867	if (!f.file)
 868		goto out;
 869
 870	/* userland u16 ->events contains POLL... bitmap */
 871	filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
 872	pwait->_key = filter | busy_flag;
 873	mask = vfs_poll(f.file, pwait);
 874	if (mask & busy_flag)
 875		*can_busy_poll = true;
 876	mask &= filter;		/* Mask out unneeded events. */
 877	fdput(f);
 
 
 
 
 
 878
 879out:
 880	/* ... and so does ->revents */
 881	pollfd->revents = mangle_poll(mask);
 882	return mask;
 883}
 884
 885static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
 886		   struct timespec64 *end_time)
 887{
 888	poll_table* pt = &wait->pt;
 889	ktime_t expire, *to = NULL;
 890	int timed_out = 0, count = 0;
 891	u64 slack = 0;
 892	__poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 893	unsigned long busy_start = 0;
 894
 895	/* Optimise the no-wait case */
 896	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 897		pt->_qproc = NULL;
 898		timed_out = 1;
 899	}
 900
 901	if (end_time && !timed_out)
 902		slack = select_estimate_accuracy(end_time);
 903
 904	for (;;) {
 905		struct poll_list *walk;
 906		bool can_busy_loop = false;
 907
 908		for (walk = list; walk != NULL; walk = walk->next) {
 909			struct pollfd * pfd, * pfd_end;
 910
 911			pfd = walk->entries;
 912			pfd_end = pfd + walk->len;
 913			for (; pfd != pfd_end; pfd++) {
 914				/*
 915				 * Fish for events. If we found one, record it
 916				 * and kill poll_table->_qproc, so we don't
 917				 * needlessly register any other waiters after
 918				 * this. They'll get immediately deregistered
 919				 * when we break out and return.
 920				 */
 921				if (do_pollfd(pfd, pt, &can_busy_loop,
 922					      busy_flag)) {
 923					count++;
 924					pt->_qproc = NULL;
 925					/* found something, stop busy polling */
 926					busy_flag = 0;
 927					can_busy_loop = false;
 928				}
 929			}
 930		}
 931		/*
 932		 * All waiters have already been registered, so don't provide
 933		 * a poll_table->_qproc to them on the next loop iteration.
 934		 */
 935		pt->_qproc = NULL;
 936		if (!count) {
 937			count = wait->error;
 938			if (signal_pending(current))
 939				count = -ERESTARTNOHAND;
 940		}
 941		if (count || timed_out)
 942			break;
 943
 944		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 945		if (can_busy_loop && !need_resched()) {
 946			if (!busy_start) {
 947				busy_start = busy_loop_current_time();
 948				continue;
 949			}
 950			if (!busy_loop_timeout(busy_start))
 951				continue;
 952		}
 953		busy_flag = 0;
 954
 955		/*
 956		 * If this is the first loop and we have a timeout
 957		 * given, then we convert to ktime_t and set the to
 958		 * pointer to the expiry value.
 959		 */
 960		if (end_time && !to) {
 961			expire = timespec64_to_ktime(*end_time);
 962			to = &expire;
 963		}
 964
 965		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
 966			timed_out = 1;
 967	}
 968	return count;
 969}
 970
 971#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
 972			sizeof(struct pollfd))
 973
 974static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
 975		struct timespec64 *end_time)
 976{
 977	struct poll_wqueues table;
 978	int err = -EFAULT, fdcount, len;
 979	/* Allocate small arguments on the stack to save memory and be
 980	   faster - use long to make sure the buffer is aligned properly
 981	   on 64 bit archs to avoid unaligned access */
 982	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
 983	struct poll_list *const head = (struct poll_list *)stack_pps;
 984 	struct poll_list *walk = head;
 985 	unsigned long todo = nfds;
 986
 987	if (nfds > rlimit(RLIMIT_NOFILE))
 988		return -EINVAL;
 989
 990	len = min_t(unsigned int, nfds, N_STACK_PPS);
 991	for (;;) {
 992		walk->next = NULL;
 993		walk->len = len;
 994		if (!len)
 995			break;
 996
 997		if (copy_from_user(walk->entries, ufds + nfds-todo,
 998					sizeof(struct pollfd) * walk->len))
 999			goto out_fds;
1000
1001		todo -= walk->len;
1002		if (!todo)
1003			break;
1004
1005		len = min(todo, POLLFD_PER_PAGE);
1006		walk = walk->next = kmalloc(struct_size(walk, entries, len),
1007					    GFP_KERNEL);
1008		if (!walk) {
1009			err = -ENOMEM;
1010			goto out_fds;
1011		}
1012	}
1013
1014	poll_initwait(&table);
1015	fdcount = do_poll(head, &table, end_time);
1016	poll_freewait(&table);
1017
1018	if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
1019		goto out_fds;
1020
1021	for (walk = head; walk; walk = walk->next) {
1022		struct pollfd *fds = walk->entries;
1023		int j;
1024
1025		for (j = walk->len; j; fds++, ufds++, j--)
1026			unsafe_put_user(fds->revents, &ufds->revents, Efault);
 
1027  	}
1028	user_write_access_end();
1029
1030	err = fdcount;
1031out_fds:
1032	walk = head->next;
1033	while (walk) {
1034		struct poll_list *pos = walk;
1035		walk = walk->next;
1036		kfree(pos);
1037	}
1038
1039	return err;
1040
1041Efault:
1042	user_write_access_end();
1043	err = -EFAULT;
1044	goto out_fds;
1045}
1046
1047static long do_restart_poll(struct restart_block *restart_block)
1048{
1049	struct pollfd __user *ufds = restart_block->poll.ufds;
1050	int nfds = restart_block->poll.nfds;
1051	struct timespec64 *to = NULL, end_time;
1052	int ret;
1053
1054	if (restart_block->poll.has_timeout) {
1055		end_time.tv_sec = restart_block->poll.tv_sec;
1056		end_time.tv_nsec = restart_block->poll.tv_nsec;
1057		to = &end_time;
1058	}
1059
1060	ret = do_sys_poll(ufds, nfds, to);
1061
1062	if (ret == -ERESTARTNOHAND)
1063		ret = set_restart_fn(restart_block, do_restart_poll);
1064
 
1065	return ret;
1066}
1067
1068SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
1069		int, timeout_msecs)
1070{
1071	struct timespec64 end_time, *to = NULL;
1072	int ret;
1073
1074	if (timeout_msecs >= 0) {
1075		to = &end_time;
1076		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
1077			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
1078	}
1079
1080	ret = do_sys_poll(ufds, nfds, to);
1081
1082	if (ret == -ERESTARTNOHAND) {
1083		struct restart_block *restart_block;
1084
1085		restart_block = &current->restart_block;
 
1086		restart_block->poll.ufds = ufds;
1087		restart_block->poll.nfds = nfds;
1088
1089		if (timeout_msecs >= 0) {
1090			restart_block->poll.tv_sec = end_time.tv_sec;
1091			restart_block->poll.tv_nsec = end_time.tv_nsec;
1092			restart_block->poll.has_timeout = 1;
1093		} else
1094			restart_block->poll.has_timeout = 0;
1095
1096		ret = set_restart_fn(restart_block, do_restart_poll);
1097	}
1098	return ret;
1099}
1100
1101SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1102		struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
1103		size_t, sigsetsize)
1104{
1105	struct timespec64 ts, end_time, *to = NULL;
 
 
1106	int ret;
1107
1108	if (tsp) {
1109		if (get_timespec64(&ts, tsp))
1110			return -EFAULT;
1111
1112		to = &end_time;
1113		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1114			return -EINVAL;
1115	}
1116
1117	ret = set_user_sigmask(sigmask, sigsetsize);
1118	if (ret)
1119		return ret;
1120
1121	ret = do_sys_poll(ufds, nfds, to);
1122	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1123}
1124
1125#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
1126
1127SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1128		struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
1129		size_t, sigsetsize)
1130{
1131	struct timespec64 ts, end_time, *to = NULL;
1132	int ret;
1133
1134	if (tsp) {
1135		if (get_old_timespec32(&ts, tsp))
1136			return -EFAULT;
1137
1138		to = &end_time;
1139		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1140			return -EINVAL;
1141	}
1142
1143	ret = set_user_sigmask(sigmask, sigsetsize);
1144	if (ret)
1145		return ret;
1146
1147	ret = do_sys_poll(ufds, nfds, to);
1148	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1149}
1150#endif
1151
1152#ifdef CONFIG_COMPAT
1153#define __COMPAT_NFDBITS       (8 * sizeof(compat_ulong_t))
1154
1155/*
1156 * Ooo, nasty.  We need here to frob 32-bit unsigned longs to
1157 * 64-bit unsigned longs.
1158 */
1159static
1160int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1161			unsigned long *fdset)
1162{
1163	if (ufdset) {
1164		return compat_get_bitmap(fdset, ufdset, nr);
1165	} else {
1166		zero_fd_set(nr, fdset);
1167		return 0;
1168	}
1169}
1170
1171static
1172int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
1173		      unsigned long *fdset)
1174{
1175	if (!ufdset)
1176		return 0;
1177	return compat_put_bitmap(ufdset, fdset, nr);
1178}
1179
1180
1181/*
1182 * This is a virtual copy of sys_select from fs/select.c and probably
1183 * should be compared to it from time to time
1184 */
1185
1186/*
1187 * We can actually return ERESTARTSYS instead of EINTR, but I'd
1188 * like to be certain this leads to no problems. So I return
1189 * EINTR just for safety.
1190 *
1191 * Update: ERESTARTSYS breaks at least the xview clock binary, so
1192 * I'm trying ERESTARTNOHAND which restart only when you want to.
1193 */
1194static int compat_core_sys_select(int n, compat_ulong_t __user *inp,
1195	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1196	struct timespec64 *end_time)
1197{
1198	fd_set_bits fds;
1199	void *bits;
1200	int size, max_fds, ret = -EINVAL;
1201	struct fdtable *fdt;
1202	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1203
1204	if (n < 0)
1205		goto out_nofds;
1206
1207	/* max_fds can increase, so grab it once to avoid race */
1208	rcu_read_lock();
1209	fdt = files_fdtable(current->files);
1210	max_fds = fdt->max_fds;
1211	rcu_read_unlock();
1212	if (n > max_fds)
1213		n = max_fds;
1214
1215	/*
1216	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
1217	 * since we used fdset we need to allocate memory in units of
1218	 * long-words.
1219	 */
1220	size = FDS_BYTES(n);
1221	bits = stack_fds;
1222	if (size > sizeof(stack_fds) / 6) {
1223		bits = kmalloc_array(6, size, GFP_KERNEL);
1224		ret = -ENOMEM;
1225		if (!bits)
1226			goto out_nofds;
1227	}
1228	fds.in      = (unsigned long *)  bits;
1229	fds.out     = (unsigned long *) (bits +   size);
1230	fds.ex      = (unsigned long *) (bits + 2*size);
1231	fds.res_in  = (unsigned long *) (bits + 3*size);
1232	fds.res_out = (unsigned long *) (bits + 4*size);
1233	fds.res_ex  = (unsigned long *) (bits + 5*size);
1234
1235	if ((ret = compat_get_fd_set(n, inp, fds.in)) ||
1236	    (ret = compat_get_fd_set(n, outp, fds.out)) ||
1237	    (ret = compat_get_fd_set(n, exp, fds.ex)))
1238		goto out;
1239	zero_fd_set(n, fds.res_in);
1240	zero_fd_set(n, fds.res_out);
1241	zero_fd_set(n, fds.res_ex);
1242
1243	ret = do_select(n, &fds, end_time);
1244
1245	if (ret < 0)
1246		goto out;
1247	if (!ret) {
1248		ret = -ERESTARTNOHAND;
1249		if (signal_pending(current))
1250			goto out;
1251		ret = 0;
1252	}
1253
1254	if (compat_set_fd_set(n, inp, fds.res_in) ||
1255	    compat_set_fd_set(n, outp, fds.res_out) ||
1256	    compat_set_fd_set(n, exp, fds.res_ex))
1257		ret = -EFAULT;
1258out:
1259	if (bits != stack_fds)
1260		kfree(bits);
1261out_nofds:
1262	return ret;
1263}
1264
1265static int do_compat_select(int n, compat_ulong_t __user *inp,
1266	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1267	struct old_timeval32 __user *tvp)
1268{
1269	struct timespec64 end_time, *to = NULL;
1270	struct old_timeval32 tv;
1271	int ret;
1272
1273	if (tvp) {
1274		if (copy_from_user(&tv, tvp, sizeof(tv)))
1275			return -EFAULT;
1276
1277		to = &end_time;
1278		if (poll_select_set_timeout(to,
1279				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
1280				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
1281			return -EINVAL;
1282	}
1283
1284	ret = compat_core_sys_select(n, inp, outp, exp, to);
1285	return poll_select_finish(&end_time, tvp, PT_OLD_TIMEVAL, ret);
1286}
1287
1288COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
1289	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1290	struct old_timeval32 __user *, tvp)
1291{
1292	return do_compat_select(n, inp, outp, exp, tvp);
1293}
1294
1295struct compat_sel_arg_struct {
1296	compat_ulong_t n;
1297	compat_uptr_t inp;
1298	compat_uptr_t outp;
1299	compat_uptr_t exp;
1300	compat_uptr_t tvp;
1301};
1302
1303COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
1304{
1305	struct compat_sel_arg_struct a;
1306
1307	if (copy_from_user(&a, arg, sizeof(a)))
1308		return -EFAULT;
1309	return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1310				compat_ptr(a.exp), compat_ptr(a.tvp));
1311}
1312
1313static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1314	compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1315	void __user *tsp, compat_sigset_t __user *sigmask,
1316	compat_size_t sigsetsize, enum poll_time_type type)
1317{
1318	struct timespec64 ts, end_time, *to = NULL;
1319	int ret;
1320
1321	if (tsp) {
1322		switch (type) {
1323		case PT_OLD_TIMESPEC:
1324			if (get_old_timespec32(&ts, tsp))
1325				return -EFAULT;
1326			break;
1327		case PT_TIMESPEC:
1328			if (get_timespec64(&ts, tsp))
1329				return -EFAULT;
1330			break;
1331		default:
1332			BUG();
1333		}
1334
1335		to = &end_time;
1336		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1337			return -EINVAL;
1338	}
1339
1340	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1341	if (ret)
1342		return ret;
1343
1344	ret = compat_core_sys_select(n, inp, outp, exp, to);
1345	return poll_select_finish(&end_time, tsp, type, ret);
1346}
1347
1348struct compat_sigset_argpack {
1349	compat_uptr_t p;
1350	compat_size_t size;
1351};
1352static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
1353					    struct compat_sigset_argpack __user *from)
1354{
1355	if (from) {
1356		if (!user_read_access_begin(from, sizeof(*from)))
1357			return -EFAULT;
1358		unsafe_get_user(to->p, &from->p, Efault);
1359		unsafe_get_user(to->size, &from->size, Efault);
1360		user_read_access_end();
1361	}
1362	return 0;
1363Efault:
1364	user_access_end();
1365	return -EFAULT;
1366}
1367
1368COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
1369	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1370	struct __kernel_timespec __user *, tsp, void __user *, sig)
1371{
1372	struct compat_sigset_argpack x = {0, 0};
1373
1374	if (get_compat_sigset_argpack(&x, sig))
1375		return -EFAULT;
1376
1377	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1378				 x.size, PT_TIMESPEC);
1379}
1380
1381#if defined(CONFIG_COMPAT_32BIT_TIME)
1382
1383COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
1384	compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
1385	struct old_timespec32 __user *, tsp, void __user *, sig)
1386{
1387	struct compat_sigset_argpack x = {0, 0};
1388
1389	if (get_compat_sigset_argpack(&x, sig))
1390		return -EFAULT;
1391
1392	return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
1393				 x.size, PT_OLD_TIMESPEC);
1394}
1395
1396#endif
1397
1398#if defined(CONFIG_COMPAT_32BIT_TIME)
1399COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
1400	unsigned int,  nfds, struct old_timespec32 __user *, tsp,
1401	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1402{
1403	struct timespec64 ts, end_time, *to = NULL;
1404	int ret;
1405
1406	if (tsp) {
1407		if (get_old_timespec32(&ts, tsp))
1408			return -EFAULT;
1409
1410		to = &end_time;
1411		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1412			return -EINVAL;
1413	}
1414
1415	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1416	if (ret)
1417		return ret;
1418
1419	ret = do_sys_poll(ufds, nfds, to);
1420	return poll_select_finish(&end_time, tsp, PT_OLD_TIMESPEC, ret);
1421}
1422#endif
1423
1424/* New compat syscall for 64 bit time_t*/
1425COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1426	unsigned int,  nfds, struct __kernel_timespec __user *, tsp,
1427	const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
1428{
1429	struct timespec64 ts, end_time, *to = NULL;
1430	int ret;
1431
1432	if (tsp) {
1433		if (get_timespec64(&ts, tsp))
1434			return -EFAULT;
1435
1436		to = &end_time;
1437		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1438			return -EINVAL;
1439	}
1440
1441	ret = set_compat_user_sigmask(sigmask, sigsetsize);
1442	if (ret)
1443		return ret;
1444
1445	ret = do_sys_poll(ufds, nfds, to);
1446	return poll_select_finish(&end_time, tsp, PT_TIMESPEC, ret);
1447}
1448
1449#endif