Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * This file contains the procedures for the handling of select and poll
   3 *
   4 * Created for Linux based loosely upon Mathius Lattner's minix
   5 * patches by Peter MacDonald. Heavily edited by Linus.
   6 *
   7 *  4 February 1994
   8 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
   9 *     flag set in its personality we do *not* modify the given timeout
  10 *     parameter to reflect time remaining.
  11 *
  12 *  24 January 2000
  13 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
  14 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/sched.h>
  19#include <linux/syscalls.h>
  20#include <linux/export.h>
  21#include <linux/slab.h>
  22#include <linux/poll.h>
  23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
  24#include <linux/file.h>
  25#include <linux/fdtable.h>
  26#include <linux/fs.h>
  27#include <linux/rcupdate.h>
  28#include <linux/hrtimer.h>
  29#include <linux/sched/rt.h>
  30#include <linux/freezer.h>
  31#include <net/busy_poll.h>
  32#include <linux/vmalloc.h>
  33
  34#include <linux/uaccess.h>
  35
  36
  37/*
  38 * Estimate expected accuracy in ns from a timeval.
  39 *
  40 * After quite a bit of churning around, we've settled on
  41 * a simple thing of taking 0.1% of the timeout as the
  42 * slack, with a cap of 100 msec.
  43 * "nice" tasks get a 0.5% slack instead.
  44 *
  45 * Consider this comment an open invitation to come up with even
  46 * better solutions..
  47 */
  48
  49#define MAX_SLACK	(100 * NSEC_PER_MSEC)
  50
  51static long __estimate_accuracy(struct timespec64 *tv)
  52{
  53	long slack;
  54	int divfactor = 1000;
  55
  56	if (tv->tv_sec < 0)
  57		return 0;
  58
  59	if (task_nice(current) > 0)
  60		divfactor = divfactor / 5;
  61
  62	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
  63		return MAX_SLACK;
  64
  65	slack = tv->tv_nsec / divfactor;
  66	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
  67
  68	if (slack > MAX_SLACK)
  69		return MAX_SLACK;
  70
  71	return slack;
  72}
  73
  74u64 select_estimate_accuracy(struct timespec64 *tv)
  75{
  76	u64 ret;
  77	struct timespec64 now;
  78
  79	/*
  80	 * Realtime tasks get a slack of 0 for obvious reasons.
  81	 */
  82
  83	if (rt_task(current))
  84		return 0;
  85
  86	ktime_get_ts64(&now);
  87	now = timespec64_sub(*tv, now);
  88	ret = __estimate_accuracy(&now);
  89	if (ret < current->timer_slack_ns)
  90		return current->timer_slack_ns;
  91	return ret;
  92}
  93
  94
  95
  96struct poll_table_page {
  97	struct poll_table_page * next;
  98	struct poll_table_entry * entry;
  99	struct poll_table_entry entries[0];
 100};
 101
 102#define POLL_TABLE_FULL(table) \
 103	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
 104
 105/*
 106 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
 107 * I have rewritten this, taking some shortcuts: This code may not be easy to
 108 * follow, but it should be free of race-conditions, and it's practical. If you
 109 * understand what I'm doing here, then you understand how the linux
 110 * sleep/wakeup mechanism works.
 111 *
 112 * Two very simple procedures, poll_wait() and poll_freewait() make all the
 113 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
 114 * as all select/poll functions have to call it to add an entry to the
 115 * poll table.
 116 */
 117static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 118		       poll_table *p);
 119
 120void poll_initwait(struct poll_wqueues *pwq)
 121{
 122	init_poll_funcptr(&pwq->pt, __pollwait);
 123	pwq->polling_task = current;
 124	pwq->triggered = 0;
 125	pwq->error = 0;
 126	pwq->table = NULL;
 127	pwq->inline_index = 0;
 128}
 129EXPORT_SYMBOL(poll_initwait);
 130
 131static void free_poll_entry(struct poll_table_entry *entry)
 132{
 133	remove_wait_queue(entry->wait_address, &entry->wait);
 134	fput(entry->filp);
 135}
 136
 137void poll_freewait(struct poll_wqueues *pwq)
 138{
 139	struct poll_table_page * p = pwq->table;
 140	int i;
 141	for (i = 0; i < pwq->inline_index; i++)
 142		free_poll_entry(pwq->inline_entries + i);
 143	while (p) {
 144		struct poll_table_entry * entry;
 145		struct poll_table_page *old;
 146
 147		entry = p->entry;
 148		do {
 149			entry--;
 150			free_poll_entry(entry);
 151		} while (entry > p->entries);
 152		old = p;
 153		p = p->next;
 154		free_page((unsigned long) old);
 155	}
 156}
 157EXPORT_SYMBOL(poll_freewait);
 158
 159static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
 160{
 161	struct poll_table_page *table = p->table;
 162
 163	if (p->inline_index < N_INLINE_POLL_ENTRIES)
 164		return p->inline_entries + p->inline_index++;
 165
 166	if (!table || POLL_TABLE_FULL(table)) {
 167		struct poll_table_page *new_table;
 168
 169		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
 170		if (!new_table) {
 171			p->error = -ENOMEM;
 172			return NULL;
 173		}
 174		new_table->entry = new_table->entries;
 175		new_table->next = table;
 176		p->table = new_table;
 177		table = new_table;
 178	}
 179
 180	return table->entry++;
 181}
 182
 183static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 184{
 185	struct poll_wqueues *pwq = wait->private;
 186	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
 187
 188	/*
 189	 * Although this function is called under waitqueue lock, LOCK
 190	 * doesn't imply write barrier and the users expect write
 191	 * barrier semantics on wakeup functions.  The following
 192	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
 193	 * and is paired with smp_store_mb() in poll_schedule_timeout.
 194	 */
 195	smp_wmb();
 196	pwq->triggered = 1;
 197
 198	/*
 199	 * Perform the default wake up operation using a dummy
 200	 * waitqueue.
 201	 *
 202	 * TODO: This is hacky but there currently is no interface to
 203	 * pass in @sync.  @sync is scheduled to be removed and once
 204	 * that happens, wake_up_process() can be used directly.
 205	 */
 206	return default_wake_function(&dummy_wait, mode, sync, key);
 207}
 208
 209static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 210{
 211	struct poll_table_entry *entry;
 212
 213	entry = container_of(wait, struct poll_table_entry, wait);
 214	if (key && !((unsigned long)key & entry->key))
 215		return 0;
 216	return __pollwake(wait, mode, sync, key);
 217}
 218
 219/* Add a new entry */
 220static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
 221				poll_table *p)
 222{
 223	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
 224	struct poll_table_entry *entry = poll_get_entry(pwq);
 225	if (!entry)
 226		return;
 227	entry->filp = get_file(filp);
 
 228	entry->wait_address = wait_address;
 229	entry->key = p->_key;
 230	init_waitqueue_func_entry(&entry->wait, pollwake);
 231	entry->wait.private = pwq;
 232	add_wait_queue(wait_address, &entry->wait);
 233}
 234
 235int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 236			  ktime_t *expires, unsigned long slack)
 237{
 238	int rc = -EINTR;
 239
 240	set_current_state(state);
 241	if (!pwq->triggered)
 242		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
 243	__set_current_state(TASK_RUNNING);
 244
 245	/*
 246	 * Prepare for the next iteration.
 247	 *
 248	 * The following smp_store_mb() serves two purposes.  First, it's
 249	 * the counterpart rmb of the wmb in pollwake() such that data
 250	 * written before wake up is always visible after wake up.
 251	 * Second, the full barrier guarantees that triggered clearing
 252	 * doesn't pass event check of the next iteration.  Note that
 253	 * this problem doesn't exist for the first iteration as
 254	 * add_wait_queue() has full barrier semantics.
 255	 */
 256	smp_store_mb(pwq->triggered, 0);
 257
 258	return rc;
 259}
 260EXPORT_SYMBOL(poll_schedule_timeout);
 261
 262/**
 263 * poll_select_set_timeout - helper function to setup the timeout value
 264 * @to:		pointer to timespec64 variable for the final timeout
 265 * @sec:	seconds (from user space)
 266 * @nsec:	nanoseconds (from user space)
 267 *
 268 * Note, we do not use a timespec for the user space value here, That
 269 * way we can use the function for timeval and compat interfaces as well.
 270 *
 271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
 272 */
 273int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
 274{
 275	struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec};
 276
 277	if (!timespec64_valid(&ts))
 278		return -EINVAL;
 279
 280	/* Optimize for the zero timeout value here */
 281	if (!sec && !nsec) {
 282		to->tv_sec = to->tv_nsec = 0;
 283	} else {
 284		ktime_get_ts64(to);
 285		*to = timespec64_add_safe(*to, ts);
 286	}
 287	return 0;
 288}
 289
 290static int poll_select_copy_remaining(struct timespec64 *end_time,
 291				      void __user *p,
 292				      int timeval, int ret)
 293{
 294	struct timespec64 rts64;
 295	struct timespec rts;
 296	struct timeval rtv;
 297
 298	if (!p)
 299		return ret;
 300
 301	if (current->personality & STICKY_TIMEOUTS)
 302		goto sticky;
 303
 304	/* No update for zero timeout */
 305	if (!end_time->tv_sec && !end_time->tv_nsec)
 306		return ret;
 307
 308	ktime_get_ts64(&rts64);
 309	rts64 = timespec64_sub(*end_time, rts64);
 310	if (rts64.tv_sec < 0)
 311		rts64.tv_sec = rts64.tv_nsec = 0;
 312
 313	rts = timespec64_to_timespec(rts64);
 314
 315	if (timeval) {
 316		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
 317			memset(&rtv, 0, sizeof(rtv));
 318		rtv.tv_sec = rts64.tv_sec;
 319		rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC;
 320
 321		if (!copy_to_user(p, &rtv, sizeof(rtv)))
 322			return ret;
 323
 324	} else if (!copy_to_user(p, &rts, sizeof(rts)))
 325		return ret;
 326
 327	/*
 328	 * If an application puts its timeval in read-only memory, we
 329	 * don't want the Linux-specific update to the timeval to
 330	 * cause a fault after the select has completed
 331	 * successfully. However, because we're not updating the
 332	 * timeval, we can't restart the system call.
 333	 */
 334
 335sticky:
 336	if (ret == -ERESTARTNOHAND)
 337		ret = -EINTR;
 338	return ret;
 339}
 340
 341#define FDS_IN(fds, n)		(fds->in + n)
 342#define FDS_OUT(fds, n)		(fds->out + n)
 343#define FDS_EX(fds, n)		(fds->ex + n)
 344
 345#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
 346
 347static int max_select_fd(unsigned long n, fd_set_bits *fds)
 348{
 349	unsigned long *open_fds;
 350	unsigned long set;
 351	int max;
 352	struct fdtable *fdt;
 353
 354	/* handle last in-complete long-word first */
 355	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
 356	n /= BITS_PER_LONG;
 357	fdt = files_fdtable(current->files);
 358	open_fds = fdt->open_fds + n;
 359	max = 0;
 360	if (set) {
 361		set &= BITS(fds, n);
 362		if (set) {
 363			if (!(set & ~*open_fds))
 364				goto get_max;
 365			return -EBADF;
 366		}
 367	}
 368	while (n) {
 369		open_fds--;
 370		n--;
 371		set = BITS(fds, n);
 372		if (!set)
 373			continue;
 374		if (set & ~*open_fds)
 375			return -EBADF;
 376		if (max)
 377			continue;
 378get_max:
 379		do {
 380			max++;
 381			set >>= 1;
 382		} while (set);
 383		max += n * BITS_PER_LONG;
 384	}
 385
 386	return max;
 387}
 388
 389#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
 390#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 391#define POLLEX_SET (POLLPRI)
 392
 393static inline void wait_key_set(poll_table *wait, unsigned long in,
 394				unsigned long out, unsigned long bit,
 395				unsigned int ll_flag)
 396{
 397	wait->_key = POLLEX_SET | ll_flag;
 398	if (in & bit)
 399		wait->_key |= POLLIN_SET;
 400	if (out & bit)
 401		wait->_key |= POLLOUT_SET;
 402}
 403
 404int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
 405{
 406	ktime_t expire, *to = NULL;
 407	struct poll_wqueues table;
 408	poll_table *wait;
 409	int retval, i, timed_out = 0;
 410	u64 slack = 0;
 411	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 412	unsigned long busy_end = 0;
 413
 414	rcu_read_lock();
 415	retval = max_select_fd(n, fds);
 416	rcu_read_unlock();
 417
 418	if (retval < 0)
 419		return retval;
 420	n = retval;
 421
 422	poll_initwait(&table);
 423	wait = &table.pt;
 424	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 425		wait->_qproc = NULL;
 426		timed_out = 1;
 427	}
 428
 429	if (end_time && !timed_out)
 430		slack = select_estimate_accuracy(end_time);
 431
 432	retval = 0;
 433	for (;;) {
 434		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
 435		bool can_busy_loop = false;
 436
 437		inp = fds->in; outp = fds->out; exp = fds->ex;
 438		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
 439
 440		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
 441			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
 442			unsigned long res_in = 0, res_out = 0, res_ex = 0;
 
 
 443
 444			in = *inp++; out = *outp++; ex = *exp++;
 445			all_bits = in | out | ex;
 446			if (all_bits == 0) {
 447				i += BITS_PER_LONG;
 448				continue;
 449			}
 450
 451			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
 452				struct fd f;
 453				if (i >= n)
 454					break;
 455				if (!(bit & all_bits))
 456					continue;
 457				f = fdget(i);
 458				if (f.file) {
 459					const struct file_operations *f_op;
 460					f_op = f.file->f_op;
 461					mask = DEFAULT_POLLMASK;
 462					if (f_op->poll) {
 463						wait_key_set(wait, in, out,
 464							     bit, busy_flag);
 465						mask = (*f_op->poll)(f.file, wait);
 466					}
 467					fdput(f);
 468					if ((mask & POLLIN_SET) && (in & bit)) {
 469						res_in |= bit;
 470						retval++;
 471						wait->_qproc = NULL;
 472					}
 473					if ((mask & POLLOUT_SET) && (out & bit)) {
 474						res_out |= bit;
 475						retval++;
 476						wait->_qproc = NULL;
 477					}
 478					if ((mask & POLLEX_SET) && (ex & bit)) {
 479						res_ex |= bit;
 480						retval++;
 481						wait->_qproc = NULL;
 482					}
 483					/* got something, stop busy polling */
 484					if (retval) {
 485						can_busy_loop = false;
 486						busy_flag = 0;
 487
 488					/*
 489					 * only remember a returned
 490					 * POLL_BUSY_LOOP if we asked for it
 491					 */
 492					} else if (busy_flag & mask)
 493						can_busy_loop = true;
 494
 495				}
 496			}
 497			if (res_in)
 498				*rinp = res_in;
 499			if (res_out)
 500				*routp = res_out;
 501			if (res_ex)
 502				*rexp = res_ex;
 503			cond_resched();
 504		}
 505		wait->_qproc = NULL;
 506		if (retval || timed_out || signal_pending(current))
 507			break;
 508		if (table.error) {
 509			retval = table.error;
 510			break;
 511		}
 512
 513		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 514		if (can_busy_loop && !need_resched()) {
 515			if (!busy_end) {
 516				busy_end = busy_loop_end_time();
 517				continue;
 518			}
 519			if (!busy_loop_timeout(busy_end))
 520				continue;
 521		}
 522		busy_flag = 0;
 523
 524		/*
 525		 * If this is the first loop and we have a timeout
 526		 * given, then we convert to ktime_t and set the to
 527		 * pointer to the expiry value.
 528		 */
 529		if (end_time && !to) {
 530			expire = timespec64_to_ktime(*end_time);
 531			to = &expire;
 532		}
 533
 534		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
 535					   to, slack))
 536			timed_out = 1;
 537	}
 538
 539	poll_freewait(&table);
 540
 541	return retval;
 542}
 543
 544/*
 545 * We can actually return ERESTARTSYS instead of EINTR, but I'd
 546 * like to be certain this leads to no problems. So I return
 547 * EINTR just for safety.
 548 *
 549 * Update: ERESTARTSYS breaks at least the xview clock binary, so
 550 * I'm trying ERESTARTNOHAND which restart only when you want to.
 551 */
 552int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
 553			   fd_set __user *exp, struct timespec64 *end_time)
 554{
 555	fd_set_bits fds;
 556	void *bits;
 557	int ret, max_fds;
 558	size_t size, alloc_size;
 559	struct fdtable *fdt;
 560	/* Allocate small arguments on the stack to save memory and be faster */
 561	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
 562
 563	ret = -EINVAL;
 564	if (n < 0)
 565		goto out_nofds;
 566
 567	/* max_fds can increase, so grab it once to avoid race */
 568	rcu_read_lock();
 569	fdt = files_fdtable(current->files);
 570	max_fds = fdt->max_fds;
 571	rcu_read_unlock();
 572	if (n > max_fds)
 573		n = max_fds;
 574
 575	/*
 576	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
 577	 * since we used fdset we need to allocate memory in units of
 578	 * long-words. 
 579	 */
 580	size = FDS_BYTES(n);
 581	bits = stack_fds;
 582	if (size > sizeof(stack_fds) / 6) {
 583		/* Not enough space in on-stack array; must use kmalloc */
 584		ret = -ENOMEM;
 585		if (size > (SIZE_MAX / 6))
 586			goto out_nofds;
 587
 588		alloc_size = 6 * size;
 589		bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
 590		if (!bits && alloc_size > PAGE_SIZE)
 591			bits = vmalloc(alloc_size);
 592
 593		if (!bits)
 594			goto out_nofds;
 595	}
 596	fds.in      = bits;
 597	fds.out     = bits +   size;
 598	fds.ex      = bits + 2*size;
 599	fds.res_in  = bits + 3*size;
 600	fds.res_out = bits + 4*size;
 601	fds.res_ex  = bits + 5*size;
 602
 603	if ((ret = get_fd_set(n, inp, fds.in)) ||
 604	    (ret = get_fd_set(n, outp, fds.out)) ||
 605	    (ret = get_fd_set(n, exp, fds.ex)))
 606		goto out;
 607	zero_fd_set(n, fds.res_in);
 608	zero_fd_set(n, fds.res_out);
 609	zero_fd_set(n, fds.res_ex);
 610
 611	ret = do_select(n, &fds, end_time);
 612
 613	if (ret < 0)
 614		goto out;
 615	if (!ret) {
 616		ret = -ERESTARTNOHAND;
 617		if (signal_pending(current))
 618			goto out;
 619		ret = 0;
 620	}
 621
 622	if (set_fd_set(n, inp, fds.res_in) ||
 623	    set_fd_set(n, outp, fds.res_out) ||
 624	    set_fd_set(n, exp, fds.res_ex))
 625		ret = -EFAULT;
 626
 627out:
 628	if (bits != stack_fds)
 629		kvfree(bits);
 630out_nofds:
 631	return ret;
 632}
 633
 634SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
 635		fd_set __user *, exp, struct timeval __user *, tvp)
 636{
 637	struct timespec64 end_time, *to = NULL;
 638	struct timeval tv;
 639	int ret;
 640
 641	if (tvp) {
 642		if (copy_from_user(&tv, tvp, sizeof(tv)))
 643			return -EFAULT;
 644
 645		to = &end_time;
 646		if (poll_select_set_timeout(to,
 647				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
 648				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
 649			return -EINVAL;
 650	}
 651
 652	ret = core_sys_select(n, inp, outp, exp, to);
 653	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
 654
 655	return ret;
 656}
 657
 658static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
 659		       fd_set __user *exp, struct timespec __user *tsp,
 660		       const sigset_t __user *sigmask, size_t sigsetsize)
 661{
 662	sigset_t ksigmask, sigsaved;
 663	struct timespec ts;
 664	struct timespec64 ts64, end_time, *to = NULL;
 665	int ret;
 666
 667	if (tsp) {
 668		if (copy_from_user(&ts, tsp, sizeof(ts)))
 669			return -EFAULT;
 670		ts64 = timespec_to_timespec64(ts);
 671
 672		to = &end_time;
 673		if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec))
 674			return -EINVAL;
 675	}
 676
 677	if (sigmask) {
 678		/* XXX: Don't preclude handling different sized sigset_t's.  */
 679		if (sigsetsize != sizeof(sigset_t))
 680			return -EINVAL;
 681		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
 682			return -EFAULT;
 683
 684		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
 685		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 686	}
 687
 688	ret = core_sys_select(n, inp, outp, exp, to);
 689	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
 690
 691	if (ret == -ERESTARTNOHAND) {
 692		/*
 693		 * Don't restore the signal mask yet. Let do_signal() deliver
 694		 * the signal on the way back to userspace, before the signal
 695		 * mask is restored.
 696		 */
 697		if (sigmask) {
 698			memcpy(&current->saved_sigmask, &sigsaved,
 699					sizeof(sigsaved));
 700			set_restore_sigmask();
 701		}
 702	} else if (sigmask)
 703		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 704
 705	return ret;
 706}
 707
 708/*
 709 * Most architectures can't handle 7-argument syscalls. So we provide a
 710 * 6-argument version where the sixth argument is a pointer to a structure
 711 * which has a pointer to the sigset_t itself followed by a size_t containing
 712 * the sigset size.
 713 */
 714SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
 715		fd_set __user *, exp, struct timespec __user *, tsp,
 716		void __user *, sig)
 717{
 718	size_t sigsetsize = 0;
 719	sigset_t __user *up = NULL;
 720
 721	if (sig) {
 722		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
 723		    || __get_user(up, (sigset_t __user * __user *)sig)
 724		    || __get_user(sigsetsize,
 725				(size_t __user *)(sig+sizeof(void *))))
 726			return -EFAULT;
 727	}
 728
 729	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
 730}
 731
 732#ifdef __ARCH_WANT_SYS_OLD_SELECT
 733struct sel_arg_struct {
 734	unsigned long n;
 735	fd_set __user *inp, *outp, *exp;
 736	struct timeval __user *tvp;
 737};
 738
 739SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
 740{
 741	struct sel_arg_struct a;
 742
 743	if (copy_from_user(&a, arg, sizeof(a)))
 744		return -EFAULT;
 745	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
 746}
 747#endif
 748
 749struct poll_list {
 750	struct poll_list *next;
 751	int len;
 752	struct pollfd entries[0];
 753};
 754
 755#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
 756
 757/*
 758 * Fish for pollable events on the pollfd->fd file descriptor. We're only
 759 * interested in events matching the pollfd->events mask, and the result
 760 * matching that mask is both recorded in pollfd->revents and returned. The
 761 * pwait poll_table will be used by the fd-provided poll handler for waiting,
 762 * if pwait->_qproc is non-NULL.
 763 */
 764static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
 765				     bool *can_busy_poll,
 766				     unsigned int busy_flag)
 767{
 768	unsigned int mask;
 769	int fd;
 770
 771	mask = 0;
 772	fd = pollfd->fd;
 773	if (fd >= 0) {
 774		struct fd f = fdget(fd);
 
 
 
 775		mask = POLLNVAL;
 776		if (f.file) {
 777			mask = DEFAULT_POLLMASK;
 778			if (f.file->f_op->poll) {
 779				pwait->_key = pollfd->events|POLLERR|POLLHUP;
 780				pwait->_key |= busy_flag;
 781				mask = f.file->f_op->poll(f.file, pwait);
 782				if (mask & busy_flag)
 783					*can_busy_poll = true;
 784			}
 785			/* Mask out unneeded events. */
 786			mask &= pollfd->events | POLLERR | POLLHUP;
 787			fdput(f);
 788		}
 789	}
 790	pollfd->revents = mask;
 791
 792	return mask;
 793}
 794
 795static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
 796		   struct timespec64 *end_time)
 797{
 798	poll_table* pt = &wait->pt;
 799	ktime_t expire, *to = NULL;
 800	int timed_out = 0, count = 0;
 801	u64 slack = 0;
 802	unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
 803	unsigned long busy_end = 0;
 804
 805	/* Optimise the no-wait case */
 806	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
 807		pt->_qproc = NULL;
 808		timed_out = 1;
 809	}
 810
 811	if (end_time && !timed_out)
 812		slack = select_estimate_accuracy(end_time);
 813
 814	for (;;) {
 815		struct poll_list *walk;
 816		bool can_busy_loop = false;
 817
 818		for (walk = list; walk != NULL; walk = walk->next) {
 819			struct pollfd * pfd, * pfd_end;
 820
 821			pfd = walk->entries;
 822			pfd_end = pfd + walk->len;
 823			for (; pfd != pfd_end; pfd++) {
 824				/*
 825				 * Fish for events. If we found one, record it
 826				 * and kill poll_table->_qproc, so we don't
 827				 * needlessly register any other waiters after
 828				 * this. They'll get immediately deregistered
 829				 * when we break out and return.
 830				 */
 831				if (do_pollfd(pfd, pt, &can_busy_loop,
 832					      busy_flag)) {
 833					count++;
 834					pt->_qproc = NULL;
 835					/* found something, stop busy polling */
 836					busy_flag = 0;
 837					can_busy_loop = false;
 838				}
 839			}
 840		}
 841		/*
 842		 * All waiters have already been registered, so don't provide
 843		 * a poll_table->_qproc to them on the next loop iteration.
 844		 */
 845		pt->_qproc = NULL;
 846		if (!count) {
 847			count = wait->error;
 848			if (signal_pending(current))
 849				count = -EINTR;
 850		}
 851		if (count || timed_out)
 852			break;
 853
 854		/* only if found POLL_BUSY_LOOP sockets && not out of time */
 855		if (can_busy_loop && !need_resched()) {
 856			if (!busy_end) {
 857				busy_end = busy_loop_end_time();
 858				continue;
 859			}
 860			if (!busy_loop_timeout(busy_end))
 861				continue;
 862		}
 863		busy_flag = 0;
 864
 865		/*
 866		 * If this is the first loop and we have a timeout
 867		 * given, then we convert to ktime_t and set the to
 868		 * pointer to the expiry value.
 869		 */
 870		if (end_time && !to) {
 871			expire = timespec64_to_ktime(*end_time);
 872			to = &expire;
 873		}
 874
 875		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
 876			timed_out = 1;
 877	}
 878	return count;
 879}
 880
 881#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
 882			sizeof(struct pollfd))
 883
 884int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
 885		struct timespec64 *end_time)
 886{
 887	struct poll_wqueues table;
 888 	int err = -EFAULT, fdcount, len, size;
 889	/* Allocate small arguments on the stack to save memory and be
 890	   faster - use long to make sure the buffer is aligned properly
 891	   on 64 bit archs to avoid unaligned access */
 892	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
 893	struct poll_list *const head = (struct poll_list *)stack_pps;
 894 	struct poll_list *walk = head;
 895 	unsigned long todo = nfds;
 896
 897	if (nfds > rlimit(RLIMIT_NOFILE))
 898		return -EINVAL;
 899
 900	len = min_t(unsigned int, nfds, N_STACK_PPS);
 901	for (;;) {
 902		walk->next = NULL;
 903		walk->len = len;
 904		if (!len)
 905			break;
 906
 907		if (copy_from_user(walk->entries, ufds + nfds-todo,
 908					sizeof(struct pollfd) * walk->len))
 909			goto out_fds;
 910
 911		todo -= walk->len;
 912		if (!todo)
 913			break;
 914
 915		len = min(todo, POLLFD_PER_PAGE);
 916		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
 917		walk = walk->next = kmalloc(size, GFP_KERNEL);
 918		if (!walk) {
 919			err = -ENOMEM;
 920			goto out_fds;
 921		}
 922	}
 923
 924	poll_initwait(&table);
 925	fdcount = do_poll(head, &table, end_time);
 926	poll_freewait(&table);
 927
 928	for (walk = head; walk; walk = walk->next) {
 929		struct pollfd *fds = walk->entries;
 930		int j;
 931
 932		for (j = 0; j < walk->len; j++, ufds++)
 933			if (__put_user(fds[j].revents, &ufds->revents))
 934				goto out_fds;
 935  	}
 936
 937	err = fdcount;
 938out_fds:
 939	walk = head->next;
 940	while (walk) {
 941		struct poll_list *pos = walk;
 942		walk = walk->next;
 943		kfree(pos);
 944	}
 945
 946	return err;
 947}
 948
 949static long do_restart_poll(struct restart_block *restart_block)
 950{
 951	struct pollfd __user *ufds = restart_block->poll.ufds;
 952	int nfds = restart_block->poll.nfds;
 953	struct timespec64 *to = NULL, end_time;
 954	int ret;
 955
 956	if (restart_block->poll.has_timeout) {
 957		end_time.tv_sec = restart_block->poll.tv_sec;
 958		end_time.tv_nsec = restart_block->poll.tv_nsec;
 959		to = &end_time;
 960	}
 961
 962	ret = do_sys_poll(ufds, nfds, to);
 963
 964	if (ret == -EINTR) {
 965		restart_block->fn = do_restart_poll;
 966		ret = -ERESTART_RESTARTBLOCK;
 967	}
 968	return ret;
 969}
 970
 971SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
 972		int, timeout_msecs)
 973{
 974	struct timespec64 end_time, *to = NULL;
 975	int ret;
 976
 977	if (timeout_msecs >= 0) {
 978		to = &end_time;
 979		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
 980			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
 981	}
 982
 983	ret = do_sys_poll(ufds, nfds, to);
 984
 985	if (ret == -EINTR) {
 986		struct restart_block *restart_block;
 987
 988		restart_block = &current->restart_block;
 989		restart_block->fn = do_restart_poll;
 990		restart_block->poll.ufds = ufds;
 991		restart_block->poll.nfds = nfds;
 992
 993		if (timeout_msecs >= 0) {
 994			restart_block->poll.tv_sec = end_time.tv_sec;
 995			restart_block->poll.tv_nsec = end_time.tv_nsec;
 996			restart_block->poll.has_timeout = 1;
 997		} else
 998			restart_block->poll.has_timeout = 0;
 999
1000		ret = -ERESTART_RESTARTBLOCK;
1001	}
1002	return ret;
1003}
1004
1005SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1006		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
1007		size_t, sigsetsize)
1008{
1009	sigset_t ksigmask, sigsaved;
1010	struct timespec ts;
1011	struct timespec64 end_time, *to = NULL;
1012	int ret;
1013
1014	if (tsp) {
1015		if (copy_from_user(&ts, tsp, sizeof(ts)))
1016			return -EFAULT;
1017
1018		to = &end_time;
1019		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
1020			return -EINVAL;
1021	}
1022
1023	if (sigmask) {
1024		/* XXX: Don't preclude handling different sized sigset_t's.  */
1025		if (sigsetsize != sizeof(sigset_t))
1026			return -EINVAL;
1027		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1028			return -EFAULT;
1029
1030		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
1031		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1032	}
1033
1034	ret = do_sys_poll(ufds, nfds, to);
1035
1036	/* We can restart this syscall, usually */
1037	if (ret == -EINTR) {
1038		/*
1039		 * Don't restore the signal mask yet. Let do_signal() deliver
1040		 * the signal on the way back to userspace, before the signal
1041		 * mask is restored.
1042		 */
1043		if (sigmask) {
1044			memcpy(&current->saved_sigmask, &sigsaved,
1045					sizeof(sigsaved));
1046			set_restore_sigmask();
1047		}
1048		ret = -ERESTARTNOHAND;
1049	} else if (sigmask)
1050		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1051
1052	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
1053
1054	return ret;
1055}
v3.5.6
  1/*
  2 * This file contains the procedures for the handling of select and poll
  3 *
  4 * Created for Linux based loosely upon Mathius Lattner's minix
  5 * patches by Peter MacDonald. Heavily edited by Linus.
  6 *
  7 *  4 February 1994
  8 *     COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
  9 *     flag set in its personality we do *not* modify the given timeout
 10 *     parameter to reflect time remaining.
 11 *
 12 *  24 January 2000
 13 *     Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 
 14 *     of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/sched.h>
 19#include <linux/syscalls.h>
 20#include <linux/export.h>
 21#include <linux/slab.h>
 22#include <linux/poll.h>
 23#include <linux/personality.h> /* for STICKY_TIMEOUTS */
 24#include <linux/file.h>
 25#include <linux/fdtable.h>
 26#include <linux/fs.h>
 27#include <linux/rcupdate.h>
 28#include <linux/hrtimer.h>
 
 
 
 
 29
 30#include <asm/uaccess.h>
 31
 32
 33/*
 34 * Estimate expected accuracy in ns from a timeval.
 35 *
 36 * After quite a bit of churning around, we've settled on
 37 * a simple thing of taking 0.1% of the timeout as the
 38 * slack, with a cap of 100 msec.
 39 * "nice" tasks get a 0.5% slack instead.
 40 *
 41 * Consider this comment an open invitation to come up with even
 42 * better solutions..
 43 */
 44
 45#define MAX_SLACK	(100 * NSEC_PER_MSEC)
 46
 47static long __estimate_accuracy(struct timespec *tv)
 48{
 49	long slack;
 50	int divfactor = 1000;
 51
 52	if (tv->tv_sec < 0)
 53		return 0;
 54
 55	if (task_nice(current) > 0)
 56		divfactor = divfactor / 5;
 57
 58	if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor))
 59		return MAX_SLACK;
 60
 61	slack = tv->tv_nsec / divfactor;
 62	slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
 63
 64	if (slack > MAX_SLACK)
 65		return MAX_SLACK;
 66
 67	return slack;
 68}
 69
 70long select_estimate_accuracy(struct timespec *tv)
 71{
 72	unsigned long ret;
 73	struct timespec now;
 74
 75	/*
 76	 * Realtime tasks get a slack of 0 for obvious reasons.
 77	 */
 78
 79	if (rt_task(current))
 80		return 0;
 81
 82	ktime_get_ts(&now);
 83	now = timespec_sub(*tv, now);
 84	ret = __estimate_accuracy(&now);
 85	if (ret < current->timer_slack_ns)
 86		return current->timer_slack_ns;
 87	return ret;
 88}
 89
 90
 91
 92struct poll_table_page {
 93	struct poll_table_page * next;
 94	struct poll_table_entry * entry;
 95	struct poll_table_entry entries[0];
 96};
 97
 98#define POLL_TABLE_FULL(table) \
 99	((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
100
101/*
102 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
103 * I have rewritten this, taking some shortcuts: This code may not be easy to
104 * follow, but it should be free of race-conditions, and it's practical. If you
105 * understand what I'm doing here, then you understand how the linux
106 * sleep/wakeup mechanism works.
107 *
108 * Two very simple procedures, poll_wait() and poll_freewait() make all the
109 * work.  poll_wait() is an inline-function defined in <linux/poll.h>,
110 * as all select/poll functions have to call it to add an entry to the
111 * poll table.
112 */
113static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
114		       poll_table *p);
115
116void poll_initwait(struct poll_wqueues *pwq)
117{
118	init_poll_funcptr(&pwq->pt, __pollwait);
119	pwq->polling_task = current;
120	pwq->triggered = 0;
121	pwq->error = 0;
122	pwq->table = NULL;
123	pwq->inline_index = 0;
124}
125EXPORT_SYMBOL(poll_initwait);
126
127static void free_poll_entry(struct poll_table_entry *entry)
128{
129	remove_wait_queue(entry->wait_address, &entry->wait);
130	fput(entry->filp);
131}
132
133void poll_freewait(struct poll_wqueues *pwq)
134{
135	struct poll_table_page * p = pwq->table;
136	int i;
137	for (i = 0; i < pwq->inline_index; i++)
138		free_poll_entry(pwq->inline_entries + i);
139	while (p) {
140		struct poll_table_entry * entry;
141		struct poll_table_page *old;
142
143		entry = p->entry;
144		do {
145			entry--;
146			free_poll_entry(entry);
147		} while (entry > p->entries);
148		old = p;
149		p = p->next;
150		free_page((unsigned long) old);
151	}
152}
153EXPORT_SYMBOL(poll_freewait);
154
155static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
156{
157	struct poll_table_page *table = p->table;
158
159	if (p->inline_index < N_INLINE_POLL_ENTRIES)
160		return p->inline_entries + p->inline_index++;
161
162	if (!table || POLL_TABLE_FULL(table)) {
163		struct poll_table_page *new_table;
164
165		new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
166		if (!new_table) {
167			p->error = -ENOMEM;
168			return NULL;
169		}
170		new_table->entry = new_table->entries;
171		new_table->next = table;
172		p->table = new_table;
173		table = new_table;
174	}
175
176	return table->entry++;
177}
178
179static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
180{
181	struct poll_wqueues *pwq = wait->private;
182	DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
183
184	/*
185	 * Although this function is called under waitqueue lock, LOCK
186	 * doesn't imply write barrier and the users expect write
187	 * barrier semantics on wakeup functions.  The following
188	 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
189	 * and is paired with set_mb() in poll_schedule_timeout.
190	 */
191	smp_wmb();
192	pwq->triggered = 1;
193
194	/*
195	 * Perform the default wake up operation using a dummy
196	 * waitqueue.
197	 *
198	 * TODO: This is hacky but there currently is no interface to
199	 * pass in @sync.  @sync is scheduled to be removed and once
200	 * that happens, wake_up_process() can be used directly.
201	 */
202	return default_wake_function(&dummy_wait, mode, sync, key);
203}
204
205static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
206{
207	struct poll_table_entry *entry;
208
209	entry = container_of(wait, struct poll_table_entry, wait);
210	if (key && !((unsigned long)key & entry->key))
211		return 0;
212	return __pollwake(wait, mode, sync, key);
213}
214
215/* Add a new entry */
216static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
217				poll_table *p)
218{
219	struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
220	struct poll_table_entry *entry = poll_get_entry(pwq);
221	if (!entry)
222		return;
223	get_file(filp);
224	entry->filp = filp;
225	entry->wait_address = wait_address;
226	entry->key = p->_key;
227	init_waitqueue_func_entry(&entry->wait, pollwake);
228	entry->wait.private = pwq;
229	add_wait_queue(wait_address, &entry->wait);
230}
231
232int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
233			  ktime_t *expires, unsigned long slack)
234{
235	int rc = -EINTR;
236
237	set_current_state(state);
238	if (!pwq->triggered)
239		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
240	__set_current_state(TASK_RUNNING);
241
242	/*
243	 * Prepare for the next iteration.
244	 *
245	 * The following set_mb() serves two purposes.  First, it's
246	 * the counterpart rmb of the wmb in pollwake() such that data
247	 * written before wake up is always visible after wake up.
248	 * Second, the full barrier guarantees that triggered clearing
249	 * doesn't pass event check of the next iteration.  Note that
250	 * this problem doesn't exist for the first iteration as
251	 * add_wait_queue() has full barrier semantics.
252	 */
253	set_mb(pwq->triggered, 0);
254
255	return rc;
256}
257EXPORT_SYMBOL(poll_schedule_timeout);
258
259/**
260 * poll_select_set_timeout - helper function to setup the timeout value
261 * @to:		pointer to timespec variable for the final timeout
262 * @sec:	seconds (from user space)
263 * @nsec:	nanoseconds (from user space)
264 *
265 * Note, we do not use a timespec for the user space value here, That
266 * way we can use the function for timeval and compat interfaces as well.
267 *
268 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
269 */
270int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
271{
272	struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
273
274	if (!timespec_valid(&ts))
275		return -EINVAL;
276
277	/* Optimize for the zero timeout value here */
278	if (!sec && !nsec) {
279		to->tv_sec = to->tv_nsec = 0;
280	} else {
281		ktime_get_ts(to);
282		*to = timespec_add_safe(*to, ts);
283	}
284	return 0;
285}
286
287static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
 
288				      int timeval, int ret)
289{
 
290	struct timespec rts;
291	struct timeval rtv;
292
293	if (!p)
294		return ret;
295
296	if (current->personality & STICKY_TIMEOUTS)
297		goto sticky;
298
299	/* No update for zero timeout */
300	if (!end_time->tv_sec && !end_time->tv_nsec)
301		return ret;
302
303	ktime_get_ts(&rts);
304	rts = timespec_sub(*end_time, rts);
305	if (rts.tv_sec < 0)
306		rts.tv_sec = rts.tv_nsec = 0;
 
 
307
308	if (timeval) {
309		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
310			memset(&rtv, 0, sizeof(rtv));
311		rtv.tv_sec = rts.tv_sec;
312		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
313
314		if (!copy_to_user(p, &rtv, sizeof(rtv)))
315			return ret;
316
317	} else if (!copy_to_user(p, &rts, sizeof(rts)))
318		return ret;
319
320	/*
321	 * If an application puts its timeval in read-only memory, we
322	 * don't want the Linux-specific update to the timeval to
323	 * cause a fault after the select has completed
324	 * successfully. However, because we're not updating the
325	 * timeval, we can't restart the system call.
326	 */
327
328sticky:
329	if (ret == -ERESTARTNOHAND)
330		ret = -EINTR;
331	return ret;
332}
333
334#define FDS_IN(fds, n)		(fds->in + n)
335#define FDS_OUT(fds, n)		(fds->out + n)
336#define FDS_EX(fds, n)		(fds->ex + n)
337
338#define BITS(fds, n)	(*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
339
340static int max_select_fd(unsigned long n, fd_set_bits *fds)
341{
342	unsigned long *open_fds;
343	unsigned long set;
344	int max;
345	struct fdtable *fdt;
346
347	/* handle last in-complete long-word first */
348	set = ~(~0UL << (n & (BITS_PER_LONG-1)));
349	n /= BITS_PER_LONG;
350	fdt = files_fdtable(current->files);
351	open_fds = fdt->open_fds + n;
352	max = 0;
353	if (set) {
354		set &= BITS(fds, n);
355		if (set) {
356			if (!(set & ~*open_fds))
357				goto get_max;
358			return -EBADF;
359		}
360	}
361	while (n) {
362		open_fds--;
363		n--;
364		set = BITS(fds, n);
365		if (!set)
366			continue;
367		if (set & ~*open_fds)
368			return -EBADF;
369		if (max)
370			continue;
371get_max:
372		do {
373			max++;
374			set >>= 1;
375		} while (set);
376		max += n * BITS_PER_LONG;
377	}
378
379	return max;
380}
381
382#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
383#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
384#define POLLEX_SET (POLLPRI)
385
386static inline void wait_key_set(poll_table *wait, unsigned long in,
387				unsigned long out, unsigned long bit)
 
388{
389	wait->_key = POLLEX_SET;
390	if (in & bit)
391		wait->_key |= POLLIN_SET;
392	if (out & bit)
393		wait->_key |= POLLOUT_SET;
394}
395
396int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
397{
398	ktime_t expire, *to = NULL;
399	struct poll_wqueues table;
400	poll_table *wait;
401	int retval, i, timed_out = 0;
402	unsigned long slack = 0;
 
 
403
404	rcu_read_lock();
405	retval = max_select_fd(n, fds);
406	rcu_read_unlock();
407
408	if (retval < 0)
409		return retval;
410	n = retval;
411
412	poll_initwait(&table);
413	wait = &table.pt;
414	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
415		wait->_qproc = NULL;
416		timed_out = 1;
417	}
418
419	if (end_time && !timed_out)
420		slack = select_estimate_accuracy(end_time);
421
422	retval = 0;
423	for (;;) {
424		unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
 
425
426		inp = fds->in; outp = fds->out; exp = fds->ex;
427		rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
428
429		for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
430			unsigned long in, out, ex, all_bits, bit = 1, mask, j;
431			unsigned long res_in = 0, res_out = 0, res_ex = 0;
432			const struct file_operations *f_op = NULL;
433			struct file *file = NULL;
434
435			in = *inp++; out = *outp++; ex = *exp++;
436			all_bits = in | out | ex;
437			if (all_bits == 0) {
438				i += BITS_PER_LONG;
439				continue;
440			}
441
442			for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
443				int fput_needed;
444				if (i >= n)
445					break;
446				if (!(bit & all_bits))
447					continue;
448				file = fget_light(i, &fput_needed);
449				if (file) {
450					f_op = file->f_op;
 
451					mask = DEFAULT_POLLMASK;
452					if (f_op && f_op->poll) {
453						wait_key_set(wait, in, out, bit);
454						mask = (*f_op->poll)(file, wait);
 
455					}
456					fput_light(file, fput_needed);
457					if ((mask & POLLIN_SET) && (in & bit)) {
458						res_in |= bit;
459						retval++;
460						wait->_qproc = NULL;
461					}
462					if ((mask & POLLOUT_SET) && (out & bit)) {
463						res_out |= bit;
464						retval++;
465						wait->_qproc = NULL;
466					}
467					if ((mask & POLLEX_SET) && (ex & bit)) {
468						res_ex |= bit;
469						retval++;
470						wait->_qproc = NULL;
471					}
 
 
 
 
 
 
 
 
 
 
 
 
472				}
473			}
474			if (res_in)
475				*rinp = res_in;
476			if (res_out)
477				*routp = res_out;
478			if (res_ex)
479				*rexp = res_ex;
480			cond_resched();
481		}
482		wait->_qproc = NULL;
483		if (retval || timed_out || signal_pending(current))
484			break;
485		if (table.error) {
486			retval = table.error;
487			break;
488		}
489
 
 
 
 
 
 
 
 
 
 
 
490		/*
491		 * If this is the first loop and we have a timeout
492		 * given, then we convert to ktime_t and set the to
493		 * pointer to the expiry value.
494		 */
495		if (end_time && !to) {
496			expire = timespec_to_ktime(*end_time);
497			to = &expire;
498		}
499
500		if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
501					   to, slack))
502			timed_out = 1;
503	}
504
505	poll_freewait(&table);
506
507	return retval;
508}
509
510/*
511 * We can actually return ERESTARTSYS instead of EINTR, but I'd
512 * like to be certain this leads to no problems. So I return
513 * EINTR just for safety.
514 *
515 * Update: ERESTARTSYS breaks at least the xview clock binary, so
516 * I'm trying ERESTARTNOHAND which restart only when you want to.
517 */
518int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
519			   fd_set __user *exp, struct timespec *end_time)
520{
521	fd_set_bits fds;
522	void *bits;
523	int ret, max_fds;
524	unsigned int size;
525	struct fdtable *fdt;
526	/* Allocate small arguments on the stack to save memory and be faster */
527	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
528
529	ret = -EINVAL;
530	if (n < 0)
531		goto out_nofds;
532
533	/* max_fds can increase, so grab it once to avoid race */
534	rcu_read_lock();
535	fdt = files_fdtable(current->files);
536	max_fds = fdt->max_fds;
537	rcu_read_unlock();
538	if (n > max_fds)
539		n = max_fds;
540
541	/*
542	 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
543	 * since we used fdset we need to allocate memory in units of
544	 * long-words. 
545	 */
546	size = FDS_BYTES(n);
547	bits = stack_fds;
548	if (size > sizeof(stack_fds) / 6) {
549		/* Not enough space in on-stack array; must use kmalloc */
550		ret = -ENOMEM;
551		bits = kmalloc(6 * size, GFP_KERNEL);
 
 
 
 
 
 
 
552		if (!bits)
553			goto out_nofds;
554	}
555	fds.in      = bits;
556	fds.out     = bits +   size;
557	fds.ex      = bits + 2*size;
558	fds.res_in  = bits + 3*size;
559	fds.res_out = bits + 4*size;
560	fds.res_ex  = bits + 5*size;
561
562	if ((ret = get_fd_set(n, inp, fds.in)) ||
563	    (ret = get_fd_set(n, outp, fds.out)) ||
564	    (ret = get_fd_set(n, exp, fds.ex)))
565		goto out;
566	zero_fd_set(n, fds.res_in);
567	zero_fd_set(n, fds.res_out);
568	zero_fd_set(n, fds.res_ex);
569
570	ret = do_select(n, &fds, end_time);
571
572	if (ret < 0)
573		goto out;
574	if (!ret) {
575		ret = -ERESTARTNOHAND;
576		if (signal_pending(current))
577			goto out;
578		ret = 0;
579	}
580
581	if (set_fd_set(n, inp, fds.res_in) ||
582	    set_fd_set(n, outp, fds.res_out) ||
583	    set_fd_set(n, exp, fds.res_ex))
584		ret = -EFAULT;
585
586out:
587	if (bits != stack_fds)
588		kfree(bits);
589out_nofds:
590	return ret;
591}
592
593SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
594		fd_set __user *, exp, struct timeval __user *, tvp)
595{
596	struct timespec end_time, *to = NULL;
597	struct timeval tv;
598	int ret;
599
600	if (tvp) {
601		if (copy_from_user(&tv, tvp, sizeof(tv)))
602			return -EFAULT;
603
604		to = &end_time;
605		if (poll_select_set_timeout(to,
606				tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
607				(tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
608			return -EINVAL;
609	}
610
611	ret = core_sys_select(n, inp, outp, exp, to);
612	ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
613
614	return ret;
615}
616
617static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
618		       fd_set __user *exp, struct timespec __user *tsp,
619		       const sigset_t __user *sigmask, size_t sigsetsize)
620{
621	sigset_t ksigmask, sigsaved;
622	struct timespec ts, end_time, *to = NULL;
 
623	int ret;
624
625	if (tsp) {
626		if (copy_from_user(&ts, tsp, sizeof(ts)))
627			return -EFAULT;
 
628
629		to = &end_time;
630		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
631			return -EINVAL;
632	}
633
634	if (sigmask) {
635		/* XXX: Don't preclude handling different sized sigset_t's.  */
636		if (sigsetsize != sizeof(sigset_t))
637			return -EINVAL;
638		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
639			return -EFAULT;
640
641		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
642		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
643	}
644
645	ret = core_sys_select(n, inp, outp, exp, to);
646	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
647
648	if (ret == -ERESTARTNOHAND) {
649		/*
650		 * Don't restore the signal mask yet. Let do_signal() deliver
651		 * the signal on the way back to userspace, before the signal
652		 * mask is restored.
653		 */
654		if (sigmask) {
655			memcpy(&current->saved_sigmask, &sigsaved,
656					sizeof(sigsaved));
657			set_restore_sigmask();
658		}
659	} else if (sigmask)
660		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
661
662	return ret;
663}
664
665/*
666 * Most architectures can't handle 7-argument syscalls. So we provide a
667 * 6-argument version where the sixth argument is a pointer to a structure
668 * which has a pointer to the sigset_t itself followed by a size_t containing
669 * the sigset size.
670 */
671SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
672		fd_set __user *, exp, struct timespec __user *, tsp,
673		void __user *, sig)
674{
675	size_t sigsetsize = 0;
676	sigset_t __user *up = NULL;
677
678	if (sig) {
679		if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
680		    || __get_user(up, (sigset_t __user * __user *)sig)
681		    || __get_user(sigsetsize,
682				(size_t __user *)(sig+sizeof(void *))))
683			return -EFAULT;
684	}
685
686	return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
687}
688
689#ifdef __ARCH_WANT_SYS_OLD_SELECT
690struct sel_arg_struct {
691	unsigned long n;
692	fd_set __user *inp, *outp, *exp;
693	struct timeval __user *tvp;
694};
695
696SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
697{
698	struct sel_arg_struct a;
699
700	if (copy_from_user(&a, arg, sizeof(a)))
701		return -EFAULT;
702	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
703}
704#endif
705
706struct poll_list {
707	struct poll_list *next;
708	int len;
709	struct pollfd entries[0];
710};
711
712#define POLLFD_PER_PAGE  ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
713
714/*
715 * Fish for pollable events on the pollfd->fd file descriptor. We're only
716 * interested in events matching the pollfd->events mask, and the result
717 * matching that mask is both recorded in pollfd->revents and returned. The
718 * pwait poll_table will be used by the fd-provided poll handler for waiting,
719 * if pwait->_qproc is non-NULL.
720 */
721static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
 
 
722{
723	unsigned int mask;
724	int fd;
725
726	mask = 0;
727	fd = pollfd->fd;
728	if (fd >= 0) {
729		int fput_needed;
730		struct file * file;
731
732		file = fget_light(fd, &fput_needed);
733		mask = POLLNVAL;
734		if (file != NULL) {
735			mask = DEFAULT_POLLMASK;
736			if (file->f_op && file->f_op->poll) {
737				pwait->_key = pollfd->events|POLLERR|POLLHUP;
738				mask = file->f_op->poll(file, pwait);
 
 
 
739			}
740			/* Mask out unneeded events. */
741			mask &= pollfd->events | POLLERR | POLLHUP;
742			fput_light(file, fput_needed);
743		}
744	}
745	pollfd->revents = mask;
746
747	return mask;
748}
749
750static int do_poll(unsigned int nfds,  struct poll_list *list,
751		   struct poll_wqueues *wait, struct timespec *end_time)
752{
753	poll_table* pt = &wait->pt;
754	ktime_t expire, *to = NULL;
755	int timed_out = 0, count = 0;
756	unsigned long slack = 0;
 
 
757
758	/* Optimise the no-wait case */
759	if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
760		pt->_qproc = NULL;
761		timed_out = 1;
762	}
763
764	if (end_time && !timed_out)
765		slack = select_estimate_accuracy(end_time);
766
767	for (;;) {
768		struct poll_list *walk;
 
769
770		for (walk = list; walk != NULL; walk = walk->next) {
771			struct pollfd * pfd, * pfd_end;
772
773			pfd = walk->entries;
774			pfd_end = pfd + walk->len;
775			for (; pfd != pfd_end; pfd++) {
776				/*
777				 * Fish for events. If we found one, record it
778				 * and kill poll_table->_qproc, so we don't
779				 * needlessly register any other waiters after
780				 * this. They'll get immediately deregistered
781				 * when we break out and return.
782				 */
783				if (do_pollfd(pfd, pt)) {
 
784					count++;
785					pt->_qproc = NULL;
 
 
 
786				}
787			}
788		}
789		/*
790		 * All waiters have already been registered, so don't provide
791		 * a poll_table->_qproc to them on the next loop iteration.
792		 */
793		pt->_qproc = NULL;
794		if (!count) {
795			count = wait->error;
796			if (signal_pending(current))
797				count = -EINTR;
798		}
799		if (count || timed_out)
800			break;
801
 
 
 
 
 
 
 
 
 
 
 
802		/*
803		 * If this is the first loop and we have a timeout
804		 * given, then we convert to ktime_t and set the to
805		 * pointer to the expiry value.
806		 */
807		if (end_time && !to) {
808			expire = timespec_to_ktime(*end_time);
809			to = &expire;
810		}
811
812		if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
813			timed_out = 1;
814	}
815	return count;
816}
817
818#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / \
819			sizeof(struct pollfd))
820
821int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
822		struct timespec *end_time)
823{
824	struct poll_wqueues table;
825 	int err = -EFAULT, fdcount, len, size;
826	/* Allocate small arguments on the stack to save memory and be
827	   faster - use long to make sure the buffer is aligned properly
828	   on 64 bit archs to avoid unaligned access */
829	long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
830	struct poll_list *const head = (struct poll_list *)stack_pps;
831 	struct poll_list *walk = head;
832 	unsigned long todo = nfds;
833
834	if (nfds > rlimit(RLIMIT_NOFILE))
835		return -EINVAL;
836
837	len = min_t(unsigned int, nfds, N_STACK_PPS);
838	for (;;) {
839		walk->next = NULL;
840		walk->len = len;
841		if (!len)
842			break;
843
844		if (copy_from_user(walk->entries, ufds + nfds-todo,
845					sizeof(struct pollfd) * walk->len))
846			goto out_fds;
847
848		todo -= walk->len;
849		if (!todo)
850			break;
851
852		len = min(todo, POLLFD_PER_PAGE);
853		size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
854		walk = walk->next = kmalloc(size, GFP_KERNEL);
855		if (!walk) {
856			err = -ENOMEM;
857			goto out_fds;
858		}
859	}
860
861	poll_initwait(&table);
862	fdcount = do_poll(nfds, head, &table, end_time);
863	poll_freewait(&table);
864
865	for (walk = head; walk; walk = walk->next) {
866		struct pollfd *fds = walk->entries;
867		int j;
868
869		for (j = 0; j < walk->len; j++, ufds++)
870			if (__put_user(fds[j].revents, &ufds->revents))
871				goto out_fds;
872  	}
873
874	err = fdcount;
875out_fds:
876	walk = head->next;
877	while (walk) {
878		struct poll_list *pos = walk;
879		walk = walk->next;
880		kfree(pos);
881	}
882
883	return err;
884}
885
886static long do_restart_poll(struct restart_block *restart_block)
887{
888	struct pollfd __user *ufds = restart_block->poll.ufds;
889	int nfds = restart_block->poll.nfds;
890	struct timespec *to = NULL, end_time;
891	int ret;
892
893	if (restart_block->poll.has_timeout) {
894		end_time.tv_sec = restart_block->poll.tv_sec;
895		end_time.tv_nsec = restart_block->poll.tv_nsec;
896		to = &end_time;
897	}
898
899	ret = do_sys_poll(ufds, nfds, to);
900
901	if (ret == -EINTR) {
902		restart_block->fn = do_restart_poll;
903		ret = -ERESTART_RESTARTBLOCK;
904	}
905	return ret;
906}
907
908SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
909		int, timeout_msecs)
910{
911	struct timespec end_time, *to = NULL;
912	int ret;
913
914	if (timeout_msecs >= 0) {
915		to = &end_time;
916		poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
917			NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
918	}
919
920	ret = do_sys_poll(ufds, nfds, to);
921
922	if (ret == -EINTR) {
923		struct restart_block *restart_block;
924
925		restart_block = &current_thread_info()->restart_block;
926		restart_block->fn = do_restart_poll;
927		restart_block->poll.ufds = ufds;
928		restart_block->poll.nfds = nfds;
929
930		if (timeout_msecs >= 0) {
931			restart_block->poll.tv_sec = end_time.tv_sec;
932			restart_block->poll.tv_nsec = end_time.tv_nsec;
933			restart_block->poll.has_timeout = 1;
934		} else
935			restart_block->poll.has_timeout = 0;
936
937		ret = -ERESTART_RESTARTBLOCK;
938	}
939	return ret;
940}
941
942SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
943		struct timespec __user *, tsp, const sigset_t __user *, sigmask,
944		size_t, sigsetsize)
945{
946	sigset_t ksigmask, sigsaved;
947	struct timespec ts, end_time, *to = NULL;
 
948	int ret;
949
950	if (tsp) {
951		if (copy_from_user(&ts, tsp, sizeof(ts)))
952			return -EFAULT;
953
954		to = &end_time;
955		if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
956			return -EINVAL;
957	}
958
959	if (sigmask) {
960		/* XXX: Don't preclude handling different sized sigset_t's.  */
961		if (sigsetsize != sizeof(sigset_t))
962			return -EINVAL;
963		if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
964			return -EFAULT;
965
966		sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
967		sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
968	}
969
970	ret = do_sys_poll(ufds, nfds, to);
971
972	/* We can restart this syscall, usually */
973	if (ret == -EINTR) {
974		/*
975		 * Don't restore the signal mask yet. Let do_signal() deliver
976		 * the signal on the way back to userspace, before the signal
977		 * mask is restored.
978		 */
979		if (sigmask) {
980			memcpy(&current->saved_sigmask, &sigsaved,
981					sizeof(sigsaved));
982			set_restore_sigmask();
983		}
984		ret = -ERESTARTNOHAND;
985	} else if (sigmask)
986		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
987
988	ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
989
990	return ret;
991}