Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers, basic process system calls
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31	Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/module.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40#include <linux/irq_work.h>
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43
  44#include <asm/uaccess.h>
  45#include <asm/unistd.h>
  46#include <asm/div64.h>
  47#include <asm/timex.h>
  48#include <asm/io.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/timer.h>
  52
  53u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  54
  55EXPORT_SYMBOL(jiffies_64);
  56
  57/*
  58 * per-CPU timer vector definitions:
  59 */
  60#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  61#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  62#define TVN_SIZE (1 << TVN_BITS)
  63#define TVR_SIZE (1 << TVR_BITS)
  64#define TVN_MASK (TVN_SIZE - 1)
  65#define TVR_MASK (TVR_SIZE - 1)
  66
  67struct tvec {
  68	struct list_head vec[TVN_SIZE];
  69};
  70
  71struct tvec_root {
  72	struct list_head vec[TVR_SIZE];
  73};
  74
  75struct tvec_base {
  76	spinlock_t lock;
  77	struct timer_list *running_timer;
  78	unsigned long timer_jiffies;
  79	unsigned long next_timer;
  80	struct tvec_root tv1;
  81	struct tvec tv2;
  82	struct tvec tv3;
  83	struct tvec tv4;
  84	struct tvec tv5;
  85} ____cacheline_aligned;
  86
  87struct tvec_base boot_tvec_bases;
  88EXPORT_SYMBOL(boot_tvec_bases);
  89static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  90
  91/* Functions below help us manage 'deferrable' flag */
  92static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  93{
  94	return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
  95}
  96
  97static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
  98{
  99	return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
 100}
 101
 102static inline void timer_set_deferrable(struct timer_list *timer)
 103{
 104	timer->base = TBASE_MAKE_DEFERRED(timer->base);
 105}
 106
 107static inline void
 108timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
 109{
 110	timer->base = (struct tvec_base *)((unsigned long)(new_base) |
 111				      tbase_get_deferrable(timer->base));
 112}
 113
 114static unsigned long round_jiffies_common(unsigned long j, int cpu,
 115		bool force_up)
 116{
 117	int rem;
 118	unsigned long original = j;
 119
 120	/*
 121	 * We don't want all cpus firing their timers at once hitting the
 122	 * same lock or cachelines, so we skew each extra cpu with an extra
 123	 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 124	 * already did this.
 125	 * The skew is done by adding 3*cpunr, then round, then subtract this
 126	 * extra offset again.
 127	 */
 128	j += cpu * 3;
 129
 130	rem = j % HZ;
 131
 132	/*
 133	 * If the target jiffie is just after a whole second (which can happen
 134	 * due to delays of the timer irq, long irq off times etc etc) then
 135	 * we should round down to the whole second, not up. Use 1/4th second
 136	 * as cutoff for this rounding as an extreme upper bound for this.
 137	 * But never round down if @force_up is set.
 138	 */
 139	if (rem < HZ/4 && !force_up) /* round down */
 140		j = j - rem;
 141	else /* round up */
 142		j = j - rem + HZ;
 143
 144	/* now that we have rounded, subtract the extra skew again */
 145	j -= cpu * 3;
 146
 147	if (j <= jiffies) /* rounding ate our timeout entirely; */
 148		return original;
 149	return j;
 150}
 151
 152/**
 153 * __round_jiffies - function to round jiffies to a full second
 154 * @j: the time in (absolute) jiffies that should be rounded
 155 * @cpu: the processor number on which the timeout will happen
 156 *
 157 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 158 * up or down to (approximately) full seconds. This is useful for timers
 159 * for which the exact time they fire does not matter too much, as long as
 160 * they fire approximately every X seconds.
 161 *
 162 * By rounding these timers to whole seconds, all such timers will fire
 163 * at the same time, rather than at various times spread out. The goal
 164 * of this is to have the CPU wake up less, which saves power.
 165 *
 166 * The exact rounding is skewed for each processor to avoid all
 167 * processors firing at the exact same time, which could lead
 168 * to lock contention or spurious cache line bouncing.
 169 *
 170 * The return value is the rounded version of the @j parameter.
 171 */
 172unsigned long __round_jiffies(unsigned long j, int cpu)
 173{
 174	return round_jiffies_common(j, cpu, false);
 175}
 176EXPORT_SYMBOL_GPL(__round_jiffies);
 177
 178/**
 179 * __round_jiffies_relative - function to round jiffies to a full second
 180 * @j: the time in (relative) jiffies that should be rounded
 181 * @cpu: the processor number on which the timeout will happen
 182 *
 183 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 184 * up or down to (approximately) full seconds. This is useful for timers
 185 * for which the exact time they fire does not matter too much, as long as
 186 * they fire approximately every X seconds.
 187 *
 188 * By rounding these timers to whole seconds, all such timers will fire
 189 * at the same time, rather than at various times spread out. The goal
 190 * of this is to have the CPU wake up less, which saves power.
 191 *
 192 * The exact rounding is skewed for each processor to avoid all
 193 * processors firing at the exact same time, which could lead
 194 * to lock contention or spurious cache line bouncing.
 195 *
 196 * The return value is the rounded version of the @j parameter.
 197 */
 198unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 199{
 200	unsigned long j0 = jiffies;
 201
 202	/* Use j0 because jiffies might change while we run */
 203	return round_jiffies_common(j + j0, cpu, false) - j0;
 204}
 205EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 206
 207/**
 208 * round_jiffies - function to round jiffies to a full second
 209 * @j: the time in (absolute) jiffies that should be rounded
 210 *
 211 * round_jiffies() rounds an absolute time in the future (in jiffies)
 212 * up or down to (approximately) full seconds. This is useful for timers
 213 * for which the exact time they fire does not matter too much, as long as
 214 * they fire approximately every X seconds.
 215 *
 216 * By rounding these timers to whole seconds, all such timers will fire
 217 * at the same time, rather than at various times spread out. The goal
 218 * of this is to have the CPU wake up less, which saves power.
 219 *
 220 * The return value is the rounded version of the @j parameter.
 221 */
 222unsigned long round_jiffies(unsigned long j)
 223{
 224	return round_jiffies_common(j, raw_smp_processor_id(), false);
 225}
 226EXPORT_SYMBOL_GPL(round_jiffies);
 227
 228/**
 229 * round_jiffies_relative - function to round jiffies to a full second
 230 * @j: the time in (relative) jiffies that should be rounded
 231 *
 232 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 233 * up or down to (approximately) full seconds. This is useful for timers
 234 * for which the exact time they fire does not matter too much, as long as
 235 * they fire approximately every X seconds.
 236 *
 237 * By rounding these timers to whole seconds, all such timers will fire
 238 * at the same time, rather than at various times spread out. The goal
 239 * of this is to have the CPU wake up less, which saves power.
 240 *
 241 * The return value is the rounded version of the @j parameter.
 242 */
 243unsigned long round_jiffies_relative(unsigned long j)
 244{
 245	return __round_jiffies_relative(j, raw_smp_processor_id());
 246}
 247EXPORT_SYMBOL_GPL(round_jiffies_relative);
 248
 249/**
 250 * __round_jiffies_up - function to round jiffies up to a full second
 251 * @j: the time in (absolute) jiffies that should be rounded
 252 * @cpu: the processor number on which the timeout will happen
 253 *
 254 * This is the same as __round_jiffies() except that it will never
 255 * round down.  This is useful for timeouts for which the exact time
 256 * of firing does not matter too much, as long as they don't fire too
 257 * early.
 258 */
 259unsigned long __round_jiffies_up(unsigned long j, int cpu)
 260{
 261	return round_jiffies_common(j, cpu, true);
 262}
 263EXPORT_SYMBOL_GPL(__round_jiffies_up);
 264
 265/**
 266 * __round_jiffies_up_relative - function to round jiffies up to a full second
 267 * @j: the time in (relative) jiffies that should be rounded
 268 * @cpu: the processor number on which the timeout will happen
 269 *
 270 * This is the same as __round_jiffies_relative() except that it will never
 271 * round down.  This is useful for timeouts for which the exact time
 272 * of firing does not matter too much, as long as they don't fire too
 273 * early.
 274 */
 275unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 276{
 277	unsigned long j0 = jiffies;
 278
 279	/* Use j0 because jiffies might change while we run */
 280	return round_jiffies_common(j + j0, cpu, true) - j0;
 281}
 282EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 283
 284/**
 285 * round_jiffies_up - function to round jiffies up to a full second
 286 * @j: the time in (absolute) jiffies that should be rounded
 287 *
 288 * This is the same as round_jiffies() except that it will never
 289 * round down.  This is useful for timeouts for which the exact time
 290 * of firing does not matter too much, as long as they don't fire too
 291 * early.
 292 */
 293unsigned long round_jiffies_up(unsigned long j)
 294{
 295	return round_jiffies_common(j, raw_smp_processor_id(), true);
 296}
 297EXPORT_SYMBOL_GPL(round_jiffies_up);
 298
 299/**
 300 * round_jiffies_up_relative - function to round jiffies up to a full second
 301 * @j: the time in (relative) jiffies that should be rounded
 302 *
 303 * This is the same as round_jiffies_relative() except that it will never
 304 * round down.  This is useful for timeouts for which the exact time
 305 * of firing does not matter too much, as long as they don't fire too
 306 * early.
 307 */
 308unsigned long round_jiffies_up_relative(unsigned long j)
 309{
 310	return __round_jiffies_up_relative(j, raw_smp_processor_id());
 311}
 312EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 313
 314/**
 315 * set_timer_slack - set the allowed slack for a timer
 316 * @timer: the timer to be modified
 317 * @slack_hz: the amount of time (in jiffies) allowed for rounding
 318 *
 319 * Set the amount of time, in jiffies, that a certain timer has
 320 * in terms of slack. By setting this value, the timer subsystem
 321 * will schedule the actual timer somewhere between
 322 * the time mod_timer() asks for, and that time plus the slack.
 323 *
 324 * By setting the slack to -1, a percentage of the delay is used
 325 * instead.
 326 */
 327void set_timer_slack(struct timer_list *timer, int slack_hz)
 328{
 329	timer->slack = slack_hz;
 330}
 331EXPORT_SYMBOL_GPL(set_timer_slack);
 332
 333static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 334{
 335	unsigned long expires = timer->expires;
 336	unsigned long idx = expires - base->timer_jiffies;
 337	struct list_head *vec;
 338
 339	if (idx < TVR_SIZE) {
 340		int i = expires & TVR_MASK;
 341		vec = base->tv1.vec + i;
 342	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 343		int i = (expires >> TVR_BITS) & TVN_MASK;
 344		vec = base->tv2.vec + i;
 345	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 346		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 347		vec = base->tv3.vec + i;
 348	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 349		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 350		vec = base->tv4.vec + i;
 351	} else if ((signed long) idx < 0) {
 352		/*
 353		 * Can happen if you add a timer with expires == jiffies,
 354		 * or you set a timer to go off in the past
 355		 */
 356		vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 357	} else {
 358		int i;
 359		/* If the timeout is larger than 0xffffffff on 64-bit
 360		 * architectures then we use the maximum timeout:
 361		 */
 362		if (idx > 0xffffffffUL) {
 363			idx = 0xffffffffUL;
 364			expires = idx + base->timer_jiffies;
 365		}
 366		i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 367		vec = base->tv5.vec + i;
 368	}
 369	/*
 370	 * Timers are FIFO:
 371	 */
 372	list_add_tail(&timer->entry, vec);
 373}
 374
 375#ifdef CONFIG_TIMER_STATS
 376void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 377{
 378	if (timer->start_site)
 379		return;
 380
 381	timer->start_site = addr;
 382	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 383	timer->start_pid = current->pid;
 384}
 385
 386static void timer_stats_account_timer(struct timer_list *timer)
 387{
 388	unsigned int flag = 0;
 389
 390	if (likely(!timer->start_site))
 391		return;
 392	if (unlikely(tbase_get_deferrable(timer->base)))
 393		flag |= TIMER_STATS_FLAG_DEFERRABLE;
 394
 395	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 396				 timer->function, timer->start_comm, flag);
 397}
 398
 399#else
 400static void timer_stats_account_timer(struct timer_list *timer) {}
 401#endif
 402
 403#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 404
 405static struct debug_obj_descr timer_debug_descr;
 406
 407static void *timer_debug_hint(void *addr)
 408{
 409	return ((struct timer_list *) addr)->function;
 410}
 411
 412/*
 413 * fixup_init is called when:
 414 * - an active object is initialized
 415 */
 416static int timer_fixup_init(void *addr, enum debug_obj_state state)
 417{
 418	struct timer_list *timer = addr;
 419
 420	switch (state) {
 421	case ODEBUG_STATE_ACTIVE:
 422		del_timer_sync(timer);
 423		debug_object_init(timer, &timer_debug_descr);
 424		return 1;
 425	default:
 426		return 0;
 427	}
 428}
 429
 430/*
 431 * fixup_activate is called when:
 432 * - an active object is activated
 433 * - an unknown object is activated (might be a statically initialized object)
 434 */
 435static int timer_fixup_activate(void *addr, enum debug_obj_state state)
 436{
 437	struct timer_list *timer = addr;
 438
 439	switch (state) {
 440
 441	case ODEBUG_STATE_NOTAVAILABLE:
 442		/*
 443		 * This is not really a fixup. The timer was
 444		 * statically initialized. We just make sure that it
 445		 * is tracked in the object tracker.
 446		 */
 447		if (timer->entry.next == NULL &&
 448		    timer->entry.prev == TIMER_ENTRY_STATIC) {
 449			debug_object_init(timer, &timer_debug_descr);
 450			debug_object_activate(timer, &timer_debug_descr);
 451			return 0;
 452		} else {
 453			WARN_ON_ONCE(1);
 454		}
 455		return 0;
 456
 457	case ODEBUG_STATE_ACTIVE:
 458		WARN_ON(1);
 459
 460	default:
 461		return 0;
 462	}
 463}
 464
 465/*
 466 * fixup_free is called when:
 467 * - an active object is freed
 468 */
 469static int timer_fixup_free(void *addr, enum debug_obj_state state)
 470{
 471	struct timer_list *timer = addr;
 472
 473	switch (state) {
 474	case ODEBUG_STATE_ACTIVE:
 475		del_timer_sync(timer);
 476		debug_object_free(timer, &timer_debug_descr);
 477		return 1;
 478	default:
 479		return 0;
 480	}
 481}
 482
 483static struct debug_obj_descr timer_debug_descr = {
 484	.name		= "timer_list",
 485	.debug_hint	= timer_debug_hint,
 486	.fixup_init	= timer_fixup_init,
 487	.fixup_activate	= timer_fixup_activate,
 488	.fixup_free	= timer_fixup_free,
 489};
 490
 491static inline void debug_timer_init(struct timer_list *timer)
 492{
 493	debug_object_init(timer, &timer_debug_descr);
 494}
 495
 496static inline void debug_timer_activate(struct timer_list *timer)
 497{
 498	debug_object_activate(timer, &timer_debug_descr);
 499}
 500
 501static inline void debug_timer_deactivate(struct timer_list *timer)
 502{
 503	debug_object_deactivate(timer, &timer_debug_descr);
 504}
 505
 506static inline void debug_timer_free(struct timer_list *timer)
 507{
 508	debug_object_free(timer, &timer_debug_descr);
 509}
 510
 511static void __init_timer(struct timer_list *timer,
 512			 const char *name,
 513			 struct lock_class_key *key);
 514
 515void init_timer_on_stack_key(struct timer_list *timer,
 516			     const char *name,
 517			     struct lock_class_key *key)
 518{
 519	debug_object_init_on_stack(timer, &timer_debug_descr);
 520	__init_timer(timer, name, key);
 521}
 522EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 523
 524void destroy_timer_on_stack(struct timer_list *timer)
 525{
 526	debug_object_free(timer, &timer_debug_descr);
 527}
 528EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 529
 530#else
 531static inline void debug_timer_init(struct timer_list *timer) { }
 532static inline void debug_timer_activate(struct timer_list *timer) { }
 533static inline void debug_timer_deactivate(struct timer_list *timer) { }
 534#endif
 535
 536static inline void debug_init(struct timer_list *timer)
 537{
 538	debug_timer_init(timer);
 539	trace_timer_init(timer);
 540}
 541
 542static inline void
 543debug_activate(struct timer_list *timer, unsigned long expires)
 544{
 545	debug_timer_activate(timer);
 546	trace_timer_start(timer, expires);
 547}
 548
 549static inline void debug_deactivate(struct timer_list *timer)
 550{
 551	debug_timer_deactivate(timer);
 552	trace_timer_cancel(timer);
 553}
 554
 555static void __init_timer(struct timer_list *timer,
 556			 const char *name,
 557			 struct lock_class_key *key)
 558{
 559	timer->entry.next = NULL;
 560	timer->base = __raw_get_cpu_var(tvec_bases);
 561	timer->slack = -1;
 562#ifdef CONFIG_TIMER_STATS
 563	timer->start_site = NULL;
 564	timer->start_pid = -1;
 565	memset(timer->start_comm, 0, TASK_COMM_LEN);
 566#endif
 567	lockdep_init_map(&timer->lockdep_map, name, key, 0);
 568}
 569
 570void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
 571					 const char *name,
 572					 struct lock_class_key *key,
 573					 void (*function)(unsigned long),
 574					 unsigned long data)
 575{
 576	timer->function = function;
 577	timer->data = data;
 578	init_timer_on_stack_key(timer, name, key);
 579	timer_set_deferrable(timer);
 580}
 581EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
 582
 583/**
 584 * init_timer_key - initialize a timer
 585 * @timer: the timer to be initialized
 586 * @name: name of the timer
 587 * @key: lockdep class key of the fake lock used for tracking timer
 588 *       sync lock dependencies
 589 *
 590 * init_timer_key() must be done to a timer prior calling *any* of the
 591 * other timer functions.
 592 */
 593void init_timer_key(struct timer_list *timer,
 594		    const char *name,
 595		    struct lock_class_key *key)
 596{
 597	debug_init(timer);
 598	__init_timer(timer, name, key);
 599}
 600EXPORT_SYMBOL(init_timer_key);
 601
 602void init_timer_deferrable_key(struct timer_list *timer,
 603			       const char *name,
 604			       struct lock_class_key *key)
 605{
 606	init_timer_key(timer, name, key);
 607	timer_set_deferrable(timer);
 608}
 609EXPORT_SYMBOL(init_timer_deferrable_key);
 610
 611static inline void detach_timer(struct timer_list *timer,
 612				int clear_pending)
 613{
 614	struct list_head *entry = &timer->entry;
 615
 616	debug_deactivate(timer);
 617
 618	__list_del(entry->prev, entry->next);
 619	if (clear_pending)
 620		entry->next = NULL;
 621	entry->prev = LIST_POISON2;
 622}
 623
 624/*
 625 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 626 * means that all timers which are tied to this base via timer->base are
 627 * locked, and the base itself is locked too.
 628 *
 629 * So __run_timers/migrate_timers can safely modify all timers which could
 630 * be found on ->tvX lists.
 631 *
 632 * When the timer's base is locked, and the timer removed from list, it is
 633 * possible to set timer->base = NULL and drop the lock: the timer remains
 634 * locked.
 635 */
 636static struct tvec_base *lock_timer_base(struct timer_list *timer,
 637					unsigned long *flags)
 638	__acquires(timer->base->lock)
 639{
 640	struct tvec_base *base;
 641
 642	for (;;) {
 643		struct tvec_base *prelock_base = timer->base;
 644		base = tbase_get_base(prelock_base);
 645		if (likely(base != NULL)) {
 646			spin_lock_irqsave(&base->lock, *flags);
 647			if (likely(prelock_base == timer->base))
 648				return base;
 649			/* The timer has migrated to another CPU */
 650			spin_unlock_irqrestore(&base->lock, *flags);
 651		}
 652		cpu_relax();
 653	}
 654}
 655
 656static inline int
 657__mod_timer(struct timer_list *timer, unsigned long expires,
 658						bool pending_only, int pinned)
 659{
 660	struct tvec_base *base, *new_base;
 661	unsigned long flags;
 662	int ret = 0 , cpu;
 663
 664	timer_stats_timer_set_start_info(timer);
 665	BUG_ON(!timer->function);
 666
 667	base = lock_timer_base(timer, &flags);
 668
 669	if (timer_pending(timer)) {
 670		detach_timer(timer, 0);
 671		if (timer->expires == base->next_timer &&
 672		    !tbase_get_deferrable(timer->base))
 673			base->next_timer = base->timer_jiffies;
 674		ret = 1;
 675	} else {
 676		if (pending_only)
 677			goto out_unlock;
 678	}
 679
 680	debug_activate(timer, expires);
 681
 682	cpu = smp_processor_id();
 683
 684#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
 685	if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
 686		cpu = get_nohz_timer_target();
 687#endif
 688	new_base = per_cpu(tvec_bases, cpu);
 689
 690	if (base != new_base) {
 691		/*
 692		 * We are trying to schedule the timer on the local CPU.
 693		 * However we can't change timer's base while it is running,
 694		 * otherwise del_timer_sync() can't detect that the timer's
 695		 * handler yet has not finished. This also guarantees that
 696		 * the timer is serialized wrt itself.
 697		 */
 698		if (likely(base->running_timer != timer)) {
 699			/* See the comment in lock_timer_base() */
 700			timer_set_base(timer, NULL);
 701			spin_unlock(&base->lock);
 702			base = new_base;
 703			spin_lock(&base->lock);
 704			timer_set_base(timer, base);
 705		}
 706	}
 707
 708	timer->expires = expires;
 709	if (time_before(timer->expires, base->next_timer) &&
 710	    !tbase_get_deferrable(timer->base))
 711		base->next_timer = timer->expires;
 712	internal_add_timer(base, timer);
 713
 714out_unlock:
 715	spin_unlock_irqrestore(&base->lock, flags);
 716
 717	return ret;
 718}
 719
 720/**
 721 * mod_timer_pending - modify a pending timer's timeout
 722 * @timer: the pending timer to be modified
 723 * @expires: new timeout in jiffies
 724 *
 725 * mod_timer_pending() is the same for pending timers as mod_timer(),
 726 * but will not re-activate and modify already deleted timers.
 727 *
 728 * It is useful for unserialized use of timers.
 729 */
 730int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 731{
 732	return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
 733}
 734EXPORT_SYMBOL(mod_timer_pending);
 735
 736/*
 737 * Decide where to put the timer while taking the slack into account
 738 *
 739 * Algorithm:
 740 *   1) calculate the maximum (absolute) time
 741 *   2) calculate the highest bit where the expires and new max are different
 742 *   3) use this bit to make a mask
 743 *   4) use the bitmask to round down the maximum time, so that all last
 744 *      bits are zeros
 745 */
 746static inline
 747unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 748{
 749	unsigned long expires_limit, mask;
 750	int bit;
 751
 752	if (timer->slack >= 0) {
 753		expires_limit = expires + timer->slack;
 754	} else {
 755		long delta = expires - jiffies;
 756
 757		if (delta < 256)
 758			return expires;
 759
 760		expires_limit = expires + delta / 256;
 761	}
 762	mask = expires ^ expires_limit;
 763	if (mask == 0)
 764		return expires;
 765
 766	bit = find_last_bit(&mask, BITS_PER_LONG);
 767
 768	mask = (1 << bit) - 1;
 769
 770	expires_limit = expires_limit & ~(mask);
 771
 772	return expires_limit;
 773}
 774
 775/**
 776 * mod_timer - modify a timer's timeout
 777 * @timer: the timer to be modified
 778 * @expires: new timeout in jiffies
 779 *
 780 * mod_timer() is a more efficient way to update the expire field of an
 781 * active timer (if the timer is inactive it will be activated)
 782 *
 783 * mod_timer(timer, expires) is equivalent to:
 784 *
 785 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 786 *
 787 * Note that if there are multiple unserialized concurrent users of the
 788 * same timer, then mod_timer() is the only safe way to modify the timeout,
 789 * since add_timer() cannot modify an already running timer.
 790 *
 791 * The function returns whether it has modified a pending timer or not.
 792 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 793 * active timer returns 1.)
 794 */
 795int mod_timer(struct timer_list *timer, unsigned long expires)
 796{
 797	expires = apply_slack(timer, expires);
 798
 799	/*
 800	 * This is a common optimization triggered by the
 801	 * networking code - if the timer is re-modified
 802	 * to be the same thing then just return:
 803	 */
 804	if (timer_pending(timer) && timer->expires == expires)
 805		return 1;
 806
 807	return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 808}
 809EXPORT_SYMBOL(mod_timer);
 810
 811/**
 812 * mod_timer_pinned - modify a timer's timeout
 813 * @timer: the timer to be modified
 814 * @expires: new timeout in jiffies
 815 *
 816 * mod_timer_pinned() is a way to update the expire field of an
 817 * active timer (if the timer is inactive it will be activated)
 818 * and not allow the timer to be migrated to a different CPU.
 819 *
 820 * mod_timer_pinned(timer, expires) is equivalent to:
 821 *
 822 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 823 */
 824int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 825{
 826	if (timer->expires == expires && timer_pending(timer))
 827		return 1;
 828
 829	return __mod_timer(timer, expires, false, TIMER_PINNED);
 830}
 831EXPORT_SYMBOL(mod_timer_pinned);
 832
 833/**
 834 * add_timer - start a timer
 835 * @timer: the timer to be added
 836 *
 837 * The kernel will do a ->function(->data) callback from the
 838 * timer interrupt at the ->expires point in the future. The
 839 * current time is 'jiffies'.
 840 *
 841 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 842 * fields must be set prior calling this function.
 843 *
 844 * Timers with an ->expires field in the past will be executed in the next
 845 * timer tick.
 846 */
 847void add_timer(struct timer_list *timer)
 848{
 849	BUG_ON(timer_pending(timer));
 850	mod_timer(timer, timer->expires);
 851}
 852EXPORT_SYMBOL(add_timer);
 853
 854/**
 855 * add_timer_on - start a timer on a particular CPU
 856 * @timer: the timer to be added
 857 * @cpu: the CPU to start it on
 858 *
 859 * This is not very scalable on SMP. Double adds are not possible.
 860 */
 861void add_timer_on(struct timer_list *timer, int cpu)
 862{
 863	struct tvec_base *base = per_cpu(tvec_bases, cpu);
 864	unsigned long flags;
 865
 866	timer_stats_timer_set_start_info(timer);
 867	BUG_ON(timer_pending(timer) || !timer->function);
 868	spin_lock_irqsave(&base->lock, flags);
 869	timer_set_base(timer, base);
 870	debug_activate(timer, timer->expires);
 871	if (time_before(timer->expires, base->next_timer) &&
 872	    !tbase_get_deferrable(timer->base))
 873		base->next_timer = timer->expires;
 874	internal_add_timer(base, timer);
 875	/*
 876	 * Check whether the other CPU is idle and needs to be
 877	 * triggered to reevaluate the timer wheel when nohz is
 878	 * active. We are protected against the other CPU fiddling
 879	 * with the timer by holding the timer base lock. This also
 880	 * makes sure that a CPU on the way to idle can not evaluate
 881	 * the timer wheel.
 882	 */
 883	wake_up_idle_cpu(cpu);
 884	spin_unlock_irqrestore(&base->lock, flags);
 885}
 886EXPORT_SYMBOL_GPL(add_timer_on);
 887
 888/**
 889 * del_timer - deactive a timer.
 890 * @timer: the timer to be deactivated
 891 *
 892 * del_timer() deactivates a timer - this works on both active and inactive
 893 * timers.
 894 *
 895 * The function returns whether it has deactivated a pending timer or not.
 896 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
 897 * active timer returns 1.)
 898 */
 899int del_timer(struct timer_list *timer)
 900{
 901	struct tvec_base *base;
 902	unsigned long flags;
 903	int ret = 0;
 904
 905	timer_stats_timer_clear_start_info(timer);
 906	if (timer_pending(timer)) {
 907		base = lock_timer_base(timer, &flags);
 908		if (timer_pending(timer)) {
 909			detach_timer(timer, 1);
 910			if (timer->expires == base->next_timer &&
 911			    !tbase_get_deferrable(timer->base))
 912				base->next_timer = base->timer_jiffies;
 913			ret = 1;
 914		}
 915		spin_unlock_irqrestore(&base->lock, flags);
 916	}
 917
 918	return ret;
 919}
 920EXPORT_SYMBOL(del_timer);
 921
 922/**
 923 * try_to_del_timer_sync - Try to deactivate a timer
 924 * @timer: timer do del
 925 *
 926 * This function tries to deactivate a timer. Upon successful (ret >= 0)
 927 * exit the timer is not queued and the handler is not running on any CPU.
 928 */
 929int try_to_del_timer_sync(struct timer_list *timer)
 930{
 931	struct tvec_base *base;
 932	unsigned long flags;
 933	int ret = -1;
 934
 935	base = lock_timer_base(timer, &flags);
 936
 937	if (base->running_timer == timer)
 938		goto out;
 939
 940	timer_stats_timer_clear_start_info(timer);
 941	ret = 0;
 942	if (timer_pending(timer)) {
 943		detach_timer(timer, 1);
 944		if (timer->expires == base->next_timer &&
 945		    !tbase_get_deferrable(timer->base))
 946			base->next_timer = base->timer_jiffies;
 947		ret = 1;
 948	}
 949out:
 950	spin_unlock_irqrestore(&base->lock, flags);
 951
 952	return ret;
 953}
 954EXPORT_SYMBOL(try_to_del_timer_sync);
 955
 956#ifdef CONFIG_SMP
 957/**
 958 * del_timer_sync - deactivate a timer and wait for the handler to finish.
 959 * @timer: the timer to be deactivated
 960 *
 961 * This function only differs from del_timer() on SMP: besides deactivating
 962 * the timer it also makes sure the handler has finished executing on other
 963 * CPUs.
 964 *
 965 * Synchronization rules: Callers must prevent restarting of the timer,
 966 * otherwise this function is meaningless. It must not be called from
 967 * interrupt contexts. The caller must not hold locks which would prevent
 968 * completion of the timer's handler. The timer's handler must not call
 969 * add_timer_on(). Upon exit the timer is not queued and the handler is
 970 * not running on any CPU.
 971 *
 972 * Note: You must not hold locks that are held in interrupt context
 973 *   while calling this function. Even if the lock has nothing to do
 974 *   with the timer in question.  Here's why:
 975 *
 976 *    CPU0                             CPU1
 977 *    ----                             ----
 978 *                                   <SOFTIRQ>
 979 *                                   call_timer_fn();
 980 *                                     base->running_timer = mytimer;
 981 *  spin_lock_irq(somelock);
 982 *                                     <IRQ>
 983 *                                        spin_lock(somelock);
 984 *  del_timer_sync(mytimer);
 985 *   while (base->running_timer == mytimer);
 986 *
 987 * Now del_timer_sync() will never return and never release somelock.
 988 * The interrupt on the other CPU is waiting to grab somelock but
 989 * it has interrupted the softirq that CPU0 is waiting to finish.
 990 *
 991 * The function returns whether it has deactivated a pending timer or not.
 992 */
 993int del_timer_sync(struct timer_list *timer)
 994{
 995#ifdef CONFIG_LOCKDEP
 996	unsigned long flags;
 997
 998	/*
 999	 * If lockdep gives a backtrace here, please reference
1000	 * the synchronization rules above.
1001	 */
1002	local_irq_save(flags);
1003	lock_map_acquire(&timer->lockdep_map);
1004	lock_map_release(&timer->lockdep_map);
1005	local_irq_restore(flags);
1006#endif
1007	/*
1008	 * don't use it in hardirq context, because it
1009	 * could lead to deadlock.
1010	 */
1011	WARN_ON(in_irq());
1012	for (;;) {
1013		int ret = try_to_del_timer_sync(timer);
1014		if (ret >= 0)
1015			return ret;
1016		cpu_relax();
1017	}
1018}
1019EXPORT_SYMBOL(del_timer_sync);
1020#endif
1021
1022static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1023{
1024	/* cascade all the timers from tv up one level */
1025	struct timer_list *timer, *tmp;
1026	struct list_head tv_list;
1027
1028	list_replace_init(tv->vec + index, &tv_list);
1029
1030	/*
1031	 * We are removing _all_ timers from the list, so we
1032	 * don't have to detach them individually.
1033	 */
1034	list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1035		BUG_ON(tbase_get_base(timer->base) != base);
1036		internal_add_timer(base, timer);
1037	}
1038
1039	return index;
1040}
1041
1042static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1043			  unsigned long data)
1044{
1045	int preempt_count = preempt_count();
1046
1047#ifdef CONFIG_LOCKDEP
1048	/*
1049	 * It is permissible to free the timer from inside the
1050	 * function that is called from it, this we need to take into
1051	 * account for lockdep too. To avoid bogus "held lock freed"
1052	 * warnings as well as problems when looking into
1053	 * timer->lockdep_map, make a copy and use that here.
1054	 */
1055	struct lockdep_map lockdep_map = timer->lockdep_map;
1056#endif
1057	/*
1058	 * Couple the lock chain with the lock chain at
1059	 * del_timer_sync() by acquiring the lock_map around the fn()
1060	 * call here and in del_timer_sync().
1061	 */
1062	lock_map_acquire(&lockdep_map);
1063
1064	trace_timer_expire_entry(timer);
1065	fn(data);
1066	trace_timer_expire_exit(timer);
1067
1068	lock_map_release(&lockdep_map);
1069
1070	if (preempt_count != preempt_count()) {
1071		WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1072			  fn, preempt_count, preempt_count());
1073		/*
1074		 * Restore the preempt count. That gives us a decent
1075		 * chance to survive and extract information. If the
1076		 * callback kept a lock held, bad luck, but not worse
1077		 * than the BUG() we had.
1078		 */
1079		preempt_count() = preempt_count;
1080	}
1081}
1082
1083#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1084
1085/**
1086 * __run_timers - run all expired timers (if any) on this CPU.
1087 * @base: the timer vector to be processed.
1088 *
1089 * This function cascades all vectors and executes all expired timer
1090 * vectors.
1091 */
1092static inline void __run_timers(struct tvec_base *base)
1093{
1094	struct timer_list *timer;
1095
1096	spin_lock_irq(&base->lock);
1097	while (time_after_eq(jiffies, base->timer_jiffies)) {
1098		struct list_head work_list;
1099		struct list_head *head = &work_list;
1100		int index = base->timer_jiffies & TVR_MASK;
1101
1102		/*
1103		 * Cascade timers:
1104		 */
1105		if (!index &&
1106			(!cascade(base, &base->tv2, INDEX(0))) &&
1107				(!cascade(base, &base->tv3, INDEX(1))) &&
1108					!cascade(base, &base->tv4, INDEX(2)))
1109			cascade(base, &base->tv5, INDEX(3));
1110		++base->timer_jiffies;
1111		list_replace_init(base->tv1.vec + index, &work_list);
1112		while (!list_empty(head)) {
1113			void (*fn)(unsigned long);
1114			unsigned long data;
1115
1116			timer = list_first_entry(head, struct timer_list,entry);
1117			fn = timer->function;
1118			data = timer->data;
1119
1120			timer_stats_account_timer(timer);
1121
1122			base->running_timer = timer;
1123			detach_timer(timer, 1);
1124
1125			spin_unlock_irq(&base->lock);
1126			call_timer_fn(timer, fn, data);
1127			spin_lock_irq(&base->lock);
1128		}
1129	}
1130	base->running_timer = NULL;
1131	spin_unlock_irq(&base->lock);
1132}
1133
1134#ifdef CONFIG_NO_HZ
1135/*
1136 * Find out when the next timer event is due to happen. This
1137 * is used on S/390 to stop all activity when a CPU is idle.
1138 * This function needs to be called with interrupts disabled.
1139 */
1140static unsigned long __next_timer_interrupt(struct tvec_base *base)
1141{
1142	unsigned long timer_jiffies = base->timer_jiffies;
1143	unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1144	int index, slot, array, found = 0;
1145	struct timer_list *nte;
1146	struct tvec *varray[4];
1147
1148	/* Look for timer events in tv1. */
1149	index = slot = timer_jiffies & TVR_MASK;
1150	do {
1151		list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1152			if (tbase_get_deferrable(nte->base))
1153				continue;
1154
1155			found = 1;
1156			expires = nte->expires;
1157			/* Look at the cascade bucket(s)? */
1158			if (!index || slot < index)
1159				goto cascade;
1160			return expires;
1161		}
1162		slot = (slot + 1) & TVR_MASK;
1163	} while (slot != index);
1164
1165cascade:
1166	/* Calculate the next cascade event */
1167	if (index)
1168		timer_jiffies += TVR_SIZE - index;
1169	timer_jiffies >>= TVR_BITS;
1170
1171	/* Check tv2-tv5. */
1172	varray[0] = &base->tv2;
1173	varray[1] = &base->tv3;
1174	varray[2] = &base->tv4;
1175	varray[3] = &base->tv5;
1176
1177	for (array = 0; array < 4; array++) {
1178		struct tvec *varp = varray[array];
1179
1180		index = slot = timer_jiffies & TVN_MASK;
1181		do {
1182			list_for_each_entry(nte, varp->vec + slot, entry) {
1183				if (tbase_get_deferrable(nte->base))
1184					continue;
1185
1186				found = 1;
1187				if (time_before(nte->expires, expires))
1188					expires = nte->expires;
1189			}
1190			/*
1191			 * Do we still search for the first timer or are
1192			 * we looking up the cascade buckets ?
1193			 */
1194			if (found) {
1195				/* Look at the cascade bucket(s)? */
1196				if (!index || slot < index)
1197					break;
1198				return expires;
1199			}
1200			slot = (slot + 1) & TVN_MASK;
1201		} while (slot != index);
1202
1203		if (index)
1204			timer_jiffies += TVN_SIZE - index;
1205		timer_jiffies >>= TVN_BITS;
1206	}
1207	return expires;
1208}
1209
1210/*
1211 * Check, if the next hrtimer event is before the next timer wheel
1212 * event:
1213 */
1214static unsigned long cmp_next_hrtimer_event(unsigned long now,
1215					    unsigned long expires)
1216{
1217	ktime_t hr_delta = hrtimer_get_next_event();
1218	struct timespec tsdelta;
1219	unsigned long delta;
1220
1221	if (hr_delta.tv64 == KTIME_MAX)
1222		return expires;
1223
1224	/*
1225	 * Expired timer available, let it expire in the next tick
1226	 */
1227	if (hr_delta.tv64 <= 0)
1228		return now + 1;
1229
1230	tsdelta = ktime_to_timespec(hr_delta);
1231	delta = timespec_to_jiffies(&tsdelta);
1232
1233	/*
1234	 * Limit the delta to the max value, which is checked in
1235	 * tick_nohz_stop_sched_tick():
1236	 */
1237	if (delta > NEXT_TIMER_MAX_DELTA)
1238		delta = NEXT_TIMER_MAX_DELTA;
1239
1240	/*
1241	 * Take rounding errors in to account and make sure, that it
1242	 * expires in the next tick. Otherwise we go into an endless
1243	 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1244	 * the timer softirq
1245	 */
1246	if (delta < 1)
1247		delta = 1;
1248	now += delta;
1249	if (time_before(now, expires))
1250		return now;
1251	return expires;
1252}
1253
1254/**
1255 * get_next_timer_interrupt - return the jiffy of the next pending timer
1256 * @now: current time (in jiffies)
1257 */
1258unsigned long get_next_timer_interrupt(unsigned long now)
1259{
1260	struct tvec_base *base = __this_cpu_read(tvec_bases);
1261	unsigned long expires;
1262
1263	/*
1264	 * Pretend that there is no timer pending if the cpu is offline.
1265	 * Possible pending timers will be migrated later to an active cpu.
1266	 */
1267	if (cpu_is_offline(smp_processor_id()))
1268		return now + NEXT_TIMER_MAX_DELTA;
1269	spin_lock(&base->lock);
1270	if (time_before_eq(base->next_timer, base->timer_jiffies))
1271		base->next_timer = __next_timer_interrupt(base);
1272	expires = base->next_timer;
1273	spin_unlock(&base->lock);
1274
1275	if (time_before_eq(expires, now))
1276		return now;
1277
1278	return cmp_next_hrtimer_event(now, expires);
1279}
1280#endif
1281
1282/*
1283 * Called from the timer interrupt handler to charge one tick to the current
1284 * process.  user_tick is 1 if the tick is user time, 0 for system.
1285 */
1286void update_process_times(int user_tick)
1287{
1288	struct task_struct *p = current;
1289	int cpu = smp_processor_id();
1290
1291	/* Note: this timer irq context must be accounted for as well. */
1292	account_process_tick(p, user_tick);
1293	run_local_timers();
1294	rcu_check_callbacks(cpu, user_tick);
1295	printk_tick();
1296#ifdef CONFIG_IRQ_WORK
1297	if (in_irq())
1298		irq_work_run();
1299#endif
1300	scheduler_tick();
1301	run_posix_cpu_timers(p);
1302}
1303
1304/*
1305 * This function runs timers and the timer-tq in bottom half context.
1306 */
1307static void run_timer_softirq(struct softirq_action *h)
1308{
1309	struct tvec_base *base = __this_cpu_read(tvec_bases);
1310
1311	hrtimer_run_pending();
1312
1313	if (time_after_eq(jiffies, base->timer_jiffies))
1314		__run_timers(base);
1315}
1316
1317/*
1318 * Called by the local, per-CPU timer interrupt on SMP.
1319 */
1320void run_local_timers(void)
1321{
1322	hrtimer_run_queues();
1323	raise_softirq(TIMER_SOFTIRQ);
1324}
1325
1326#ifdef __ARCH_WANT_SYS_ALARM
1327
1328/*
1329 * For backwards compatibility?  This can be done in libc so Alpha
1330 * and all newer ports shouldn't need it.
1331 */
1332SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1333{
1334	return alarm_setitimer(seconds);
1335}
1336
1337#endif
1338
1339#ifndef __alpha__
1340
1341/*
1342 * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
1343 * should be moved into arch/i386 instead?
1344 */
1345
1346/**
1347 * sys_getpid - return the thread group id of the current process
1348 *
1349 * Note, despite the name, this returns the tgid not the pid.  The tgid and
1350 * the pid are identical unless CLONE_THREAD was specified on clone() in
1351 * which case the tgid is the same in all threads of the same group.
1352 *
1353 * This is SMP safe as current->tgid does not change.
1354 */
1355SYSCALL_DEFINE0(getpid)
1356{
1357	return task_tgid_vnr(current);
1358}
1359
1360/*
1361 * Accessing ->real_parent is not SMP-safe, it could
1362 * change from under us. However, we can use a stale
1363 * value of ->real_parent under rcu_read_lock(), see
1364 * release_task()->call_rcu(delayed_put_task_struct).
1365 */
1366SYSCALL_DEFINE0(getppid)
1367{
1368	int pid;
1369
1370	rcu_read_lock();
1371	pid = task_tgid_vnr(current->real_parent);
1372	rcu_read_unlock();
1373
1374	return pid;
1375}
1376
1377SYSCALL_DEFINE0(getuid)
1378{
1379	/* Only we change this so SMP safe */
1380	return current_uid();
1381}
1382
1383SYSCALL_DEFINE0(geteuid)
1384{
1385	/* Only we change this so SMP safe */
1386	return current_euid();
1387}
1388
1389SYSCALL_DEFINE0(getgid)
1390{
1391	/* Only we change this so SMP safe */
1392	return current_gid();
1393}
1394
1395SYSCALL_DEFINE0(getegid)
1396{
1397	/* Only we change this so SMP safe */
1398	return  current_egid();
1399}
1400
1401#endif
1402
1403static void process_timeout(unsigned long __data)
1404{
1405	wake_up_process((struct task_struct *)__data);
1406}
1407
1408/**
1409 * schedule_timeout - sleep until timeout
1410 * @timeout: timeout value in jiffies
1411 *
1412 * Make the current task sleep until @timeout jiffies have
1413 * elapsed. The routine will return immediately unless
1414 * the current task state has been set (see set_current_state()).
1415 *
1416 * You can set the task state as follows -
1417 *
1418 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1419 * pass before the routine returns. The routine will return 0
1420 *
1421 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1422 * delivered to the current task. In this case the remaining time
1423 * in jiffies will be returned, or 0 if the timer expired in time
1424 *
1425 * The current task state is guaranteed to be TASK_RUNNING when this
1426 * routine returns.
1427 *
1428 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1429 * the CPU away without a bound on the timeout. In this case the return
1430 * value will be %MAX_SCHEDULE_TIMEOUT.
1431 *
1432 * In all cases the return value is guaranteed to be non-negative.
1433 */
1434signed long __sched schedule_timeout(signed long timeout)
1435{
1436	struct timer_list timer;
1437	unsigned long expire;
1438
1439	switch (timeout)
1440	{
1441	case MAX_SCHEDULE_TIMEOUT:
1442		/*
1443		 * These two special cases are useful to be comfortable
1444		 * in the caller. Nothing more. We could take
1445		 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1446		 * but I' d like to return a valid offset (>=0) to allow
1447		 * the caller to do everything it want with the retval.
1448		 */
1449		schedule();
1450		goto out;
1451	default:
1452		/*
1453		 * Another bit of PARANOID. Note that the retval will be
1454		 * 0 since no piece of kernel is supposed to do a check
1455		 * for a negative retval of schedule_timeout() (since it
1456		 * should never happens anyway). You just have the printk()
1457		 * that will tell you if something is gone wrong and where.
1458		 */
1459		if (timeout < 0) {
1460			printk(KERN_ERR "schedule_timeout: wrong timeout "
1461				"value %lx\n", timeout);
1462			dump_stack();
1463			current->state = TASK_RUNNING;
1464			goto out;
1465		}
1466	}
1467
1468	expire = timeout + jiffies;
1469
1470	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1471	__mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1472	schedule();
1473	del_singleshot_timer_sync(&timer);
1474
1475	/* Remove the timer from the object tracker */
1476	destroy_timer_on_stack(&timer);
1477
1478	timeout = expire - jiffies;
1479
1480 out:
1481	return timeout < 0 ? 0 : timeout;
1482}
1483EXPORT_SYMBOL(schedule_timeout);
1484
1485/*
1486 * We can use __set_current_state() here because schedule_timeout() calls
1487 * schedule() unconditionally.
1488 */
1489signed long __sched schedule_timeout_interruptible(signed long timeout)
1490{
1491	__set_current_state(TASK_INTERRUPTIBLE);
1492	return schedule_timeout(timeout);
1493}
1494EXPORT_SYMBOL(schedule_timeout_interruptible);
1495
1496signed long __sched schedule_timeout_killable(signed long timeout)
1497{
1498	__set_current_state(TASK_KILLABLE);
1499	return schedule_timeout(timeout);
1500}
1501EXPORT_SYMBOL(schedule_timeout_killable);
1502
1503signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1504{
1505	__set_current_state(TASK_UNINTERRUPTIBLE);
1506	return schedule_timeout(timeout);
1507}
1508EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1509
1510/* Thread ID - the internal kernel "pid" */
1511SYSCALL_DEFINE0(gettid)
1512{
1513	return task_pid_vnr(current);
1514}
1515
1516/**
1517 * do_sysinfo - fill in sysinfo struct
1518 * @info: pointer to buffer to fill
1519 */
1520int do_sysinfo(struct sysinfo *info)
1521{
1522	unsigned long mem_total, sav_total;
1523	unsigned int mem_unit, bitcount;
1524	struct timespec tp;
1525
1526	memset(info, 0, sizeof(struct sysinfo));
1527
1528	ktime_get_ts(&tp);
1529	monotonic_to_bootbased(&tp);
1530	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1531
1532	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1533
1534	info->procs = nr_threads;
1535
1536	si_meminfo(info);
1537	si_swapinfo(info);
1538
1539	/*
1540	 * If the sum of all the available memory (i.e. ram + swap)
1541	 * is less than can be stored in a 32 bit unsigned long then
1542	 * we can be binary compatible with 2.2.x kernels.  If not,
1543	 * well, in that case 2.2.x was broken anyways...
1544	 *
1545	 *  -Erik Andersen <andersee@debian.org>
1546	 */
1547
1548	mem_total = info->totalram + info->totalswap;
1549	if (mem_total < info->totalram || mem_total < info->totalswap)
1550		goto out;
1551	bitcount = 0;
1552	mem_unit = info->mem_unit;
1553	while (mem_unit > 1) {
1554		bitcount++;
1555		mem_unit >>= 1;
1556		sav_total = mem_total;
1557		mem_total <<= 1;
1558		if (mem_total < sav_total)
1559			goto out;
1560	}
1561
1562	/*
1563	 * If mem_total did not overflow, multiply all memory values by
1564	 * info->mem_unit and set it to 1.  This leaves things compatible
1565	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1566	 * kernels...
1567	 */
1568
1569	info->mem_unit = 1;
1570	info->totalram <<= bitcount;
1571	info->freeram <<= bitcount;
1572	info->sharedram <<= bitcount;
1573	info->bufferram <<= bitcount;
1574	info->totalswap <<= bitcount;
1575	info->freeswap <<= bitcount;
1576	info->totalhigh <<= bitcount;
1577	info->freehigh <<= bitcount;
1578
1579out:
1580	return 0;
1581}
1582
1583SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1584{
1585	struct sysinfo val;
1586
1587	do_sysinfo(&val);
1588
1589	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1590		return -EFAULT;
1591
1592	return 0;
1593}
1594
1595static int __cpuinit init_timers_cpu(int cpu)
1596{
1597	int j;
1598	struct tvec_base *base;
1599	static char __cpuinitdata tvec_base_done[NR_CPUS];
1600
1601	if (!tvec_base_done[cpu]) {
1602		static char boot_done;
1603
1604		if (boot_done) {
1605			/*
1606			 * The APs use this path later in boot
1607			 */
1608			base = kmalloc_node(sizeof(*base),
1609						GFP_KERNEL | __GFP_ZERO,
1610						cpu_to_node(cpu));
1611			if (!base)
1612				return -ENOMEM;
1613
1614			/* Make sure that tvec_base is 2 byte aligned */
1615			if (tbase_get_deferrable(base)) {
1616				WARN_ON(1);
1617				kfree(base);
1618				return -ENOMEM;
1619			}
1620			per_cpu(tvec_bases, cpu) = base;
1621		} else {
1622			/*
1623			 * This is for the boot CPU - we use compile-time
1624			 * static initialisation because per-cpu memory isn't
1625			 * ready yet and because the memory allocators are not
1626			 * initialised either.
1627			 */
1628			boot_done = 1;
1629			base = &boot_tvec_bases;
1630		}
1631		tvec_base_done[cpu] = 1;
1632	} else {
1633		base = per_cpu(tvec_bases, cpu);
1634	}
1635
1636	spin_lock_init(&base->lock);
1637
1638	for (j = 0; j < TVN_SIZE; j++) {
1639		INIT_LIST_HEAD(base->tv5.vec + j);
1640		INIT_LIST_HEAD(base->tv4.vec + j);
1641		INIT_LIST_HEAD(base->tv3.vec + j);
1642		INIT_LIST_HEAD(base->tv2.vec + j);
1643	}
1644	for (j = 0; j < TVR_SIZE; j++)
1645		INIT_LIST_HEAD(base->tv1.vec + j);
1646
1647	base->timer_jiffies = jiffies;
1648	base->next_timer = base->timer_jiffies;
1649	return 0;
1650}
1651
1652#ifdef CONFIG_HOTPLUG_CPU
1653static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1654{
1655	struct timer_list *timer;
1656
1657	while (!list_empty(head)) {
1658		timer = list_first_entry(head, struct timer_list, entry);
1659		detach_timer(timer, 0);
1660		timer_set_base(timer, new_base);
1661		if (time_before(timer->expires, new_base->next_timer) &&
1662		    !tbase_get_deferrable(timer->base))
1663			new_base->next_timer = timer->expires;
1664		internal_add_timer(new_base, timer);
1665	}
1666}
1667
1668static void __cpuinit migrate_timers(int cpu)
1669{
1670	struct tvec_base *old_base;
1671	struct tvec_base *new_base;
1672	int i;
1673
1674	BUG_ON(cpu_online(cpu));
1675	old_base = per_cpu(tvec_bases, cpu);
1676	new_base = get_cpu_var(tvec_bases);
1677	/*
1678	 * The caller is globally serialized and nobody else
1679	 * takes two locks at once, deadlock is not possible.
1680	 */
1681	spin_lock_irq(&new_base->lock);
1682	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1683
1684	BUG_ON(old_base->running_timer);
1685
1686	for (i = 0; i < TVR_SIZE; i++)
1687		migrate_timer_list(new_base, old_base->tv1.vec + i);
1688	for (i = 0; i < TVN_SIZE; i++) {
1689		migrate_timer_list(new_base, old_base->tv2.vec + i);
1690		migrate_timer_list(new_base, old_base->tv3.vec + i);
1691		migrate_timer_list(new_base, old_base->tv4.vec + i);
1692		migrate_timer_list(new_base, old_base->tv5.vec + i);
1693	}
1694
1695	spin_unlock(&old_base->lock);
1696	spin_unlock_irq(&new_base->lock);
1697	put_cpu_var(tvec_bases);
1698}
1699#endif /* CONFIG_HOTPLUG_CPU */
1700
1701static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1702				unsigned long action, void *hcpu)
1703{
1704	long cpu = (long)hcpu;
1705	int err;
1706
1707	switch(action) {
1708	case CPU_UP_PREPARE:
1709	case CPU_UP_PREPARE_FROZEN:
1710		err = init_timers_cpu(cpu);
1711		if (err < 0)
1712			return notifier_from_errno(err);
1713		break;
1714#ifdef CONFIG_HOTPLUG_CPU
1715	case CPU_DEAD:
1716	case CPU_DEAD_FROZEN:
1717		migrate_timers(cpu);
1718		break;
1719#endif
1720	default:
1721		break;
1722	}
1723	return NOTIFY_OK;
1724}
1725
1726static struct notifier_block __cpuinitdata timers_nb = {
1727	.notifier_call	= timer_cpu_notify,
1728};
1729
1730
1731void __init init_timers(void)
1732{
1733	int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1734				(void *)(long)smp_processor_id());
1735
1736	init_timer_stats();
1737
1738	BUG_ON(err != NOTIFY_OK);
1739	register_cpu_notifier(&timers_nb);
1740	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1741}
1742
1743/**
1744 * msleep - sleep safely even with waitqueue interruptions
1745 * @msecs: Time in milliseconds to sleep for
1746 */
1747void msleep(unsigned int msecs)
1748{
1749	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1750
1751	while (timeout)
1752		timeout = schedule_timeout_uninterruptible(timeout);
1753}
1754
1755EXPORT_SYMBOL(msleep);
1756
1757/**
1758 * msleep_interruptible - sleep waiting for signals
1759 * @msecs: Time in milliseconds to sleep for
1760 */
1761unsigned long msleep_interruptible(unsigned int msecs)
1762{
1763	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1764
1765	while (timeout && !signal_pending(current))
1766		timeout = schedule_timeout_interruptible(timeout);
1767	return jiffies_to_msecs(timeout);
1768}
1769
1770EXPORT_SYMBOL(msleep_interruptible);
1771
1772static int __sched do_usleep_range(unsigned long min, unsigned long max)
1773{
1774	ktime_t kmin;
1775	unsigned long delta;
1776
1777	kmin = ktime_set(0, min * NSEC_PER_USEC);
1778	delta = (max - min) * NSEC_PER_USEC;
1779	return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1780}
1781
1782/**
1783 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1784 * @min: Minimum time in usecs to sleep
1785 * @max: Maximum time in usecs to sleep
1786 */
1787void usleep_range(unsigned long min, unsigned long max)
1788{
1789	__set_current_state(TASK_UNINTERRUPTIBLE);
1790	do_usleep_range(min, max);
1791}
1792EXPORT_SYMBOL(usleep_range);