Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Common time routines among all ppc machines.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
   6 * Paul Mackerras' version and mine for PReP and Pmac.
   7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
   8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
   9 *
  10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  11 * to make clock more stable (2.4.0-test5). The only thing
  12 * that this code assumes is that the timebases have been synchronized
  13 * by firmware on SMP and are never stopped (never do sleep
  14 * on SMP then, nap and doze are OK).
  15 * 
  16 * Speeded up do_gettimeofday by getting rid of references to
  17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  18 *
  19 * TODO (not necessarily in this file):
  20 * - improve precision and reproducibility of timebase frequency
  21 * measurement at boot time.
 
  22 * - for astronomical applications: add a new function to get
  23 * non ambiguous timestamps even around leap seconds. This needs
  24 * a new timestamp format and a good name.
  25 *
  26 * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
  27 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
 
 
 
 
 
  28 */
  29
  30#include <linux/errno.h>
  31#include <linux/export.h>
  32#include <linux/sched.h>
  33#include <linux/sched/clock.h>
  34#include <linux/kernel.h>
  35#include <linux/param.h>
  36#include <linux/string.h>
  37#include <linux/mm.h>
  38#include <linux/interrupt.h>
  39#include <linux/timex.h>
  40#include <linux/kernel_stat.h>
  41#include <linux/time.h>
  42#include <linux/init.h>
  43#include <linux/profile.h>
  44#include <linux/cpu.h>
  45#include <linux/security.h>
  46#include <linux/percpu.h>
  47#include <linux/rtc.h>
  48#include <linux/jiffies.h>
  49#include <linux/posix-timers.h>
  50#include <linux/irq.h>
  51#include <linux/delay.h>
  52#include <linux/irq_work.h>
  53#include <linux/of_clk.h>
  54#include <linux/suspend.h>
  55#include <linux/sched/cputime.h>
  56#include <linux/sched/clock.h>
  57#include <linux/processor.h>
  58#include <asm/trace.h>
  59
  60#include <asm/interrupt.h>
  61#include <asm/io.h>
 
  62#include <asm/nvram.h>
  63#include <asm/cache.h>
  64#include <asm/machdep.h>
  65#include <linux/uaccess.h>
  66#include <asm/time.h>
  67#include <asm/prom.h>
  68#include <asm/irq.h>
  69#include <asm/div64.h>
  70#include <asm/smp.h>
  71#include <asm/vdso_datapage.h>
  72#include <asm/firmware.h>
  73#include <asm/asm-prototypes.h>
 
 
 
 
  74
  75/* powerpc clocksource/clockevent code */
  76
  77#include <linux/clockchips.h>
  78#include <linux/timekeeper_internal.h>
 
 
 
 
 
 
 
 
 
 
 
  79
  80static u64 timebase_read(struct clocksource *);
  81static struct clocksource clocksource_timebase = {
  82	.name         = "timebase",
  83	.rating       = 400,
  84	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
  85	.mask         = CLOCKSOURCE_MASK(64),
 
 
  86	.read         = timebase_read,
  87	.vdso_clock_mode	= VDSO_CLOCKMODE_ARCHTIMER,
  88};
  89
  90#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
  91u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
  92
  93static int decrementer_set_next_event(unsigned long evt,
  94				      struct clock_event_device *dev);
  95static int decrementer_shutdown(struct clock_event_device *evt);
 
 
 
 
 
 
 
 
 
 
 
 
  96
  97struct clock_event_device decrementer_clockevent = {
  98	.name			= "decrementer",
  99	.rating			= 200,
 100	.irq			= 0,
 101	.set_next_event		= decrementer_set_next_event,
 102	.set_state_oneshot_stopped = decrementer_shutdown,
 103	.set_state_shutdown	= decrementer_shutdown,
 104	.tick_resume		= decrementer_shutdown,
 105	.features		= CLOCK_EVT_FEAT_ONESHOT |
 106				  CLOCK_EVT_FEAT_C3STOP,
 107};
 108EXPORT_SYMBOL(decrementer_clockevent);
 109
 110DEFINE_PER_CPU(u64, decrementers_next_tb);
 111static DEFINE_PER_CPU(struct clock_event_device, decrementers);
 
 
 
 
 
 
 
 112
 113#define XSEC_PER_SEC (1024*1024)
 114
 115#ifdef CONFIG_PPC64
 116#define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
 117#else
 118/* compute ((xsec << 12) * max) >> 32 */
 119#define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
 120#endif
 121
 122unsigned long tb_ticks_per_jiffy;
 123unsigned long tb_ticks_per_usec = 100; /* sane default */
 124EXPORT_SYMBOL(tb_ticks_per_usec);
 125unsigned long tb_ticks_per_sec;
 126EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
 127
 128DEFINE_SPINLOCK(rtc_lock);
 129EXPORT_SYMBOL_GPL(rtc_lock);
 130
 131static u64 tb_to_ns_scale __read_mostly;
 132static unsigned tb_to_ns_shift __read_mostly;
 133static u64 boot_tb __read_mostly;
 134
 135extern struct timezone sys_tz;
 136static long timezone_offset;
 137
 138unsigned long ppc_proc_freq;
 139EXPORT_SYMBOL_GPL(ppc_proc_freq);
 140unsigned long ppc_tb_freq;
 141EXPORT_SYMBOL_GPL(ppc_tb_freq);
 142
 143bool tb_invalid;
 144
 145#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 146/*
 147 * Factor for converting from cputime_t (timebase ticks) to
 148 * microseconds. This is stored as 0.64 fixed-point binary fraction.
 
 149 */
 150u64 __cputime_usec_factor;
 151EXPORT_SYMBOL(__cputime_usec_factor);
 
 
 
 
 
 
 
 
 
 
 152
 153#ifdef CONFIG_PPC_SPLPAR
 154void (*dtl_consumer)(struct dtl_entry *, u64);
 155#endif
 156
 157static void calc_cputime_factors(void)
 158{
 159	struct div_result res;
 160
 161	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
 162	__cputime_usec_factor = res.result_low;
 
 
 
 
 
 
 163}
 164
 165/*
 166 * Read the SPURR on systems that have it, otherwise the PURR,
 167 * or if that doesn't exist return the timebase value passed in.
 168 */
 169static inline unsigned long read_spurr(unsigned long tb)
 170{
 171	if (cpu_has_feature(CPU_FTR_SPURR))
 172		return mfspr(SPRN_SPURR);
 173	if (cpu_has_feature(CPU_FTR_PURR))
 174		return mfspr(SPRN_PURR);
 175	return tb;
 176}
 177
 178#ifdef CONFIG_PPC_SPLPAR
 179
 180#include <asm/dtl.h>
 181
 182/*
 183 * Scan the dispatch trace log and count up the stolen time.
 184 * Should be called with interrupts disabled.
 185 */
 186static u64 scan_dispatch_log(u64 stop_tb)
 187{
 188	u64 i = local_paca->dtl_ridx;
 189	struct dtl_entry *dtl = local_paca->dtl_curr;
 190	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
 191	struct lppaca *vpa = local_paca->lppaca_ptr;
 192	u64 tb_delta;
 193	u64 stolen = 0;
 194	u64 dtb;
 195
 196	if (!dtl)
 197		return 0;
 198
 199	if (i == be64_to_cpu(vpa->dtl_idx))
 200		return 0;
 201	while (i < be64_to_cpu(vpa->dtl_idx)) {
 202		dtb = be64_to_cpu(dtl->timebase);
 203		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
 204			be32_to_cpu(dtl->ready_to_enqueue_time);
 
 
 205		barrier();
 206		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
 207			/* buffer has overflowed */
 208			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
 209			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
 210			continue;
 211		}
 212		if (dtb > stop_tb)
 213			break;
 214		if (dtl_consumer)
 215			dtl_consumer(dtl, i);
 216		stolen += tb_delta;
 217		++i;
 218		++dtl;
 219		if (dtl == dtl_end)
 220			dtl = local_paca->dispatch_log;
 221	}
 222	local_paca->dtl_ridx = i;
 223	local_paca->dtl_curr = dtl;
 224	return stolen;
 225}
 226
 227/*
 228 * Accumulate stolen time by scanning the dispatch trace log.
 229 * Called on entry from user mode.
 230 */
 231void notrace accumulate_stolen_time(void)
 232{
 233	u64 sst, ust;
 234	struct cpu_accounting_data *acct = &local_paca->accounting;
 235
 236	sst = scan_dispatch_log(acct->starttime_user);
 237	ust = scan_dispatch_log(acct->starttime);
 238	acct->stime -= sst;
 239	acct->utime -= ust;
 240	acct->steal_time += ust + sst;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241}
 242
 243static inline u64 calculate_stolen_time(u64 stop_tb)
 244{
 245	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
 246		return 0;
 247
 248	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
 249		return scan_dispatch_log(stop_tb);
 
 
 250
 251	return 0;
 
 
 252}
 253
 254#else /* CONFIG_PPC_SPLPAR */
 255static inline u64 calculate_stolen_time(u64 stop_tb)
 256{
 257	return 0;
 258}
 259
 260#endif /* CONFIG_PPC_SPLPAR */
 261
 262/*
 263 * Account time for a transition between system, hard irq
 264 * or soft irq state.
 265 */
 266static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
 267					unsigned long now, unsigned long stime)
 268{
 269	unsigned long stime_scaled = 0;
 270#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 271	unsigned long nowscaled, deltascaled;
 272	unsigned long utime, utime_scaled;
 273
 
 
 274	nowscaled = read_spurr(now);
 275	deltascaled = nowscaled - acct->startspurr;
 276	acct->startspurr = nowscaled;
 277	utime = acct->utime - acct->utime_sspurr;
 278	acct->utime_sspurr = acct->utime;
 
 
 
 
 
 
 
 279
 280	/*
 281	 * Because we don't read the SPURR on every kernel entry/exit,
 282	 * deltascaled includes both user and system SPURR ticks.
 283	 * Apportion these ticks to system SPURR ticks and user
 284	 * SPURR ticks in the same ratio as the system time (delta)
 285	 * and user time (udelta) values obtained from the timebase
 286	 * over the same interval.  The system ticks get accounted here;
 287	 * the user ticks get saved up in paca->user_time_scaled to be
 288	 * used by account_process_tick.
 289	 */
 290	stime_scaled = stime;
 291	utime_scaled = utime;
 292	if (deltascaled != stime + utime) {
 293		if (utime) {
 294			stime_scaled = deltascaled * stime / (stime + utime);
 295			utime_scaled = deltascaled - stime_scaled;
 296		} else {
 297			stime_scaled = deltascaled;
 298		}
 299	}
 300	acct->utime_scaled += utime_scaled;
 301#endif
 302
 303	return stime_scaled;
 304}
 305
 306static unsigned long vtime_delta(struct cpu_accounting_data *acct,
 307				 unsigned long *stime_scaled,
 308				 unsigned long *steal_time)
 309{
 310	unsigned long now, stime;
 311
 312	WARN_ON_ONCE(!irqs_disabled());
 313
 314	now = mftb();
 315	stime = now - acct->starttime;
 316	acct->starttime = now;
 317
 318	*stime_scaled = vtime_delta_scaled(acct, now, stime);
 319
 320	*steal_time = calculate_stolen_time(now);
 321
 322	return stime;
 323}
 324
 325static void vtime_delta_kernel(struct cpu_accounting_data *acct,
 326			       unsigned long *stime, unsigned long *stime_scaled)
 327{
 328	unsigned long steal_time;
 329
 330	*stime = vtime_delta(acct, stime_scaled, &steal_time);
 331	*stime -= min(*stime, steal_time);
 332	acct->steal_time += steal_time;
 333}
 334
 335void vtime_account_kernel(struct task_struct *tsk)
 336{
 337	struct cpu_accounting_data *acct = get_accounting(tsk);
 338	unsigned long stime, stime_scaled;
 339
 340	vtime_delta_kernel(acct, &stime, &stime_scaled);
 341
 342	if (tsk->flags & PF_VCPU) {
 343		acct->gtime += stime;
 344#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 345		acct->utime_scaled += stime_scaled;
 346#endif
 347	} else {
 348		acct->stime += stime;
 349#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 350		acct->stime_scaled += stime_scaled;
 351#endif
 352	}
 
 353}
 354EXPORT_SYMBOL_GPL(vtime_account_kernel);
 355
 356void vtime_account_idle(struct task_struct *tsk)
 357{
 358	unsigned long stime, stime_scaled, steal_time;
 359	struct cpu_accounting_data *acct = get_accounting(tsk);
 360
 361	stime = vtime_delta(acct, &stime_scaled, &steal_time);
 362	acct->idle_time += stime + steal_time;
 363}
 364
 365static void vtime_account_irq_field(struct cpu_accounting_data *acct,
 366				    unsigned long *field)
 367{
 368	unsigned long stime, stime_scaled;
 369
 370	vtime_delta_kernel(acct, &stime, &stime_scaled);
 371	*field += stime;
 372#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 373	acct->stime_scaled += stime_scaled;
 374#endif
 375}
 376
 377void vtime_account_softirq(struct task_struct *tsk)
 378{
 379	struct cpu_accounting_data *acct = get_accounting(tsk);
 380	vtime_account_irq_field(acct, &acct->softirq_time);
 381}
 382
 383void vtime_account_hardirq(struct task_struct *tsk)
 384{
 385	struct cpu_accounting_data *acct = get_accounting(tsk);
 386	vtime_account_irq_field(acct, &acct->hardirq_time);
 387}
 388
 389static void vtime_flush_scaled(struct task_struct *tsk,
 390			       struct cpu_accounting_data *acct)
 391{
 392#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 393	if (acct->utime_scaled)
 394		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
 395	if (acct->stime_scaled)
 396		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
 397
 398	acct->utime_scaled = 0;
 399	acct->utime_sspurr = 0;
 400	acct->stime_scaled = 0;
 401#endif
 402}
 403
 404/*
 405 * Account the whole cputime accumulated in the paca
 
 
 406 * Must be called with interrupts disabled.
 407 * Assumes that vtime_account_kernel/idle() has been called
 408 * recently (i.e. since the last entry from usermode) so that
 409 * get_paca()->user_time_scaled is up to date.
 410 */
 411void vtime_flush(struct task_struct *tsk)
 412{
 413	struct cpu_accounting_data *acct = get_accounting(tsk);
 414
 415	if (acct->utime)
 416		account_user_time(tsk, cputime_to_nsecs(acct->utime));
 417
 418	if (acct->gtime)
 419		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
 420
 421	if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
 422		account_steal_time(cputime_to_nsecs(acct->steal_time));
 423		acct->steal_time = 0;
 424	}
 425
 426	if (acct->idle_time)
 427		account_idle_time(cputime_to_nsecs(acct->idle_time));
 428
 429	if (acct->stime)
 430		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
 431					  CPUTIME_SYSTEM);
 432
 433	if (acct->hardirq_time)
 434		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
 435					  CPUTIME_IRQ);
 436	if (acct->softirq_time)
 437		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
 438					  CPUTIME_SOFTIRQ);
 439
 440	vtime_flush_scaled(tsk, acct);
 441
 442	acct->utime = 0;
 443	acct->gtime = 0;
 444	acct->idle_time = 0;
 445	acct->stime = 0;
 446	acct->hardirq_time = 0;
 447	acct->softirq_time = 0;
 448}
 449
 450#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 451#define calc_cputime_factors()
 452#endif
 453
 454void __delay(unsigned long loops)
 455{
 456	unsigned long start;
 
 457
 458	spin_begin();
 459	if (tb_invalid) {
 460		/*
 461		 * TB is in error state and isn't ticking anymore.
 462		 * HMI handler was unable to recover from TB error.
 463		 * Return immediately, so that kernel won't get stuck here.
 464		 */
 465		spin_cpu_relax();
 466	} else {
 467		start = mftb();
 468		while (mftb() - start < loops)
 469			spin_cpu_relax();
 
 470	}
 471	spin_end();
 472}
 473EXPORT_SYMBOL(__delay);
 474
 475void udelay(unsigned long usecs)
 476{
 477	__delay(tb_ticks_per_usec * usecs);
 478}
 479EXPORT_SYMBOL(udelay);
 480
 481#ifdef CONFIG_SMP
 482unsigned long profile_pc(struct pt_regs *regs)
 483{
 484	unsigned long pc = instruction_pointer(regs);
 485
 486	if (in_lock_functions(pc))
 487		return regs->link;
 488
 489	return pc;
 490}
 491EXPORT_SYMBOL(profile_pc);
 492#endif
 493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494#ifdef CONFIG_IRQ_WORK
 495
 496/*
 497 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
 498 */
 499#ifdef CONFIG_PPC64
 
 
 
 
 
 
 
 
 
 
 500static inline void set_irq_work_pending_flag(void)
 501{
 502	asm volatile("stb %0,%1(13)" : :
 503		"r" (1),
 504		"i" (offsetof(struct paca_struct, irq_work_pending)));
 505}
 506
 507static inline void clear_irq_work_pending(void)
 508{
 509	asm volatile("stb %0,%1(13)" : :
 510		"r" (0),
 511		"i" (offsetof(struct paca_struct, irq_work_pending)));
 512}
 513
 514#else /* 32-bit */
 515
 516DEFINE_PER_CPU(u8, irq_work_pending);
 517
 518#define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
 519#define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
 520#define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
 521
 522#endif /* 32 vs 64 bit */
 523
 524void arch_irq_work_raise(void)
 525{
 526	/*
 527	 * 64-bit code that uses irq soft-mask can just cause an immediate
 528	 * interrupt here that gets soft masked, if this is called under
 529	 * local_irq_disable(). It might be possible to prevent that happening
 530	 * by noticing interrupts are disabled and setting decrementer pending
 531	 * to be replayed when irqs are enabled. The problem there is that
 532	 * tracing can call irq_work_raise, including in code that does low
 533	 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
 534	 * which could get tangled up if we're messing with the same state
 535	 * here.
 536	 */
 537	preempt_disable();
 538	set_irq_work_pending_flag();
 539	set_dec(1);
 540	preempt_enable();
 541}
 542
 543#else  /* CONFIG_IRQ_WORK */
 544
 545#define test_irq_work_pending()	0
 546#define clear_irq_work_pending()
 547
 548#endif /* CONFIG_IRQ_WORK */
 549
 550/*
 
 
 
 
 
 
 
 
 
 
 551 * timer_interrupt - gets called when the decrementer overflows,
 552 * with interrupts disabled.
 553 */
 554DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
 555{
 556	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
 557	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 558	struct pt_regs *old_regs;
 
 
 559	u64 now;
 560
 561	/*
 562	 * Some implementations of hotplug will get timer interrupts while
 563	 * offline, just ignore these.
 564	 */
 565	if (unlikely(!cpu_online(smp_processor_id()))) {
 566		set_dec(decrementer_max);
 567		return;
 568	}
 569
 570	/* Ensure a positive value is written to the decrementer, or else
 571	 * some CPUs will continue to take decrementer exceptions. When the
 572	 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
 573	 * 31 bits, which is about 4 seconds on most systems, which gives
 574	 * the watchdog a chance of catching timer interrupt hard lockups.
 575	 */
 576	if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
 577		set_dec(0x7fffffff);
 578	else
 579		set_dec(decrementer_max);
 580
 581	/* Conditionally hard-enable interrupts now that the DEC has been
 582	 * bumped to its maximum value
 583	 */
 584	may_hard_irq_enable();
 
 
 
 585
 
 586
 587#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
 588	if (atomic_read(&ppc_n_lost_interrupts) != 0)
 589		__do_IRQ(regs);
 590#endif
 591
 592	old_regs = set_irq_regs(regs);
 593
 594	trace_timer_interrupt_entry(regs);
 595
 596	if (test_irq_work_pending()) {
 597		clear_irq_work_pending();
 598		irq_work_run();
 599	}
 600
 601	now = get_tb();
 602	if (now >= *next_tb) {
 603		*next_tb = ~(u64)0;
 
 
 
 
 
 604		if (evt->event_handler)
 605			evt->event_handler(evt);
 606		__this_cpu_inc(irq_stat.timer_irqs_event);
 607	} else {
 608		now = *next_tb - now;
 609		if (now <= decrementer_max)
 610			set_dec(now);
 611		/* We may have raced with new irq work */
 612		if (test_irq_work_pending())
 613			set_dec(1);
 614		__this_cpu_inc(irq_stat.timer_irqs_others);
 615	}
 616
 617	trace_timer_interrupt_exit(regs);
 
 
 
 618
 619	set_irq_regs(old_regs);
 620}
 621EXPORT_SYMBOL(timer_interrupt);
 
 
 
 
 622
 623#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 624void timer_broadcast_interrupt(void)
 625{
 626	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 627
 628	*next_tb = ~(u64)0;
 629	tick_receive_broadcast();
 630	__this_cpu_inc(irq_stat.broadcast_irqs_event);
 631}
 632#endif
 633
 634#ifdef CONFIG_SUSPEND
 635static void generic_suspend_disable_irqs(void)
 636{
 637	/* Disable the decrementer, so that it doesn't interfere
 638	 * with suspending.
 639	 */
 640
 641	set_dec(decrementer_max);
 642	local_irq_disable();
 643	set_dec(decrementer_max);
 644}
 645
 646static void generic_suspend_enable_irqs(void)
 647{
 648	local_irq_enable();
 649}
 650
 651/* Overrides the weak version in kernel/power/main.c */
 652void arch_suspend_disable_irqs(void)
 653{
 654	if (ppc_md.suspend_disable_irqs)
 655		ppc_md.suspend_disable_irqs();
 656	generic_suspend_disable_irqs();
 657}
 658
 659/* Overrides the weak version in kernel/power/main.c */
 660void arch_suspend_enable_irqs(void)
 661{
 662	generic_suspend_enable_irqs();
 663	if (ppc_md.suspend_enable_irqs)
 664		ppc_md.suspend_enable_irqs();
 665}
 666#endif
 667
 668unsigned long long tb_to_ns(unsigned long long ticks)
 669{
 670	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
 671}
 672EXPORT_SYMBOL_GPL(tb_to_ns);
 673
 674/*
 675 * Scheduler clock - returns current time in nanosec units.
 676 *
 677 * Note: mulhdu(a, b) (multiply high double unsigned) returns
 678 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
 679 * are 64-bit unsigned numbers.
 680 */
 681notrace unsigned long long sched_clock(void)
 682{
 
 
 683	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 684}
 685
 686
 687#ifdef CONFIG_PPC_PSERIES
 688
 689/*
 690 * Running clock - attempts to give a view of time passing for a virtualised
 691 * kernels.
 692 * Uses the VTB register if available otherwise a next best guess.
 693 */
 694unsigned long long running_clock(void)
 695{
 696	/*
 697	 * Don't read the VTB as a host since KVM does not switch in host
 698	 * timebase into the VTB when it takes a guest off the CPU, reading the
 699	 * VTB would result in reading 'last switched out' guest VTB.
 700	 *
 701	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
 702	 * would be unsafe to rely only on the #ifdef above.
 703	 */
 704	if (firmware_has_feature(FW_FEATURE_LPAR) &&
 705	    cpu_has_feature(CPU_FTR_ARCH_207S))
 706		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 707
 708	/*
 709	 * This is a next best approximation without a VTB.
 710	 * On a host which is running bare metal there should never be any stolen
 711	 * time and on a host which doesn't do any virtualisation TB *should* equal
 712	 * VTB so it makes no difference anyway.
 713	 */
 714	return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
 715}
 716#endif
 717
 718static int __init get_freq(char *name, int cells, unsigned long *val)
 719{
 720	struct device_node *cpu;
 721	const __be32 *fp;
 722	int found = 0;
 723
 724	/* The cpu node should have timebase and clock frequency properties */
 725	cpu = of_find_node_by_type(NULL, "cpu");
 726
 727	if (cpu) {
 728		fp = of_get_property(cpu, name, NULL);
 729		if (fp) {
 730			found = 1;
 731			*val = of_read_ulong(fp, cells);
 732		}
 733
 734		of_node_put(cpu);
 735	}
 736
 737	return found;
 738}
 739
 740static void start_cpu_decrementer(void)
 
 741{
 742#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 743	unsigned int tcr;
 744
 745	/* Clear any pending timer interrupts */
 746	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
 747
 748	tcr = mfspr(SPRN_TCR);
 749	/*
 750	 * The watchdog may have already been enabled by u-boot. So leave
 751	 * TRC[WP] (Watchdog Period) alone.
 752	 */
 753	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
 754	tcr |= TCR_DIE;		/* Enable decrementer */
 755	mtspr(SPRN_TCR, tcr);
 756#endif
 757}
 758
 759void __init generic_calibrate_decr(void)
 760{
 761	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
 762
 763	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
 764	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
 765
 766		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
 767				"(not found)\n");
 768	}
 769
 770	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
 771
 772	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
 773	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
 774
 775		printk(KERN_ERR "WARNING: Estimating processor frequency "
 776				"(not found)\n");
 777	}
 778}
 779
 780int update_persistent_clock64(struct timespec64 now)
 781{
 782	struct rtc_time tm;
 783
 784	if (!ppc_md.set_rtc_time)
 785		return -ENODEV;
 786
 787	rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
 
 
 788
 789	return ppc_md.set_rtc_time(&tm);
 790}
 791
 792static void __read_persistent_clock(struct timespec64 *ts)
 793{
 794	struct rtc_time tm;
 795	static int first = 1;
 796
 797	ts->tv_nsec = 0;
 798	/* XXX this is a litle fragile but will work okay in the short term */
 799	if (first) {
 800		first = 0;
 801		if (ppc_md.time_init)
 802			timezone_offset = ppc_md.time_init();
 803
 804		/* get_boot_time() isn't guaranteed to be safe to call late */
 805		if (ppc_md.get_boot_time) {
 806			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
 807			return;
 808		}
 809	}
 810	if (!ppc_md.get_rtc_time) {
 811		ts->tv_sec = 0;
 812		return;
 813	}
 814	ppc_md.get_rtc_time(&tm);
 815
 816	ts->tv_sec = rtc_tm_to_time64(&tm);
 
 817}
 818
 819void read_persistent_clock64(struct timespec64 *ts)
 820{
 821	__read_persistent_clock(ts);
 822
 823	/* Sanitize it in case real time clock is set below EPOCH */
 824	if (ts->tv_sec < 0) {
 825		ts->tv_sec = 0;
 826		ts->tv_nsec = 0;
 827	}
 828		
 829}
 830
 831/* clocksource code */
 832static notrace u64 timebase_read(struct clocksource *cs)
 
 
 
 
 
 833{
 834	return (u64)get_tb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835}
 836
 837static void __init clocksource_init(void)
 838{
 839	struct clocksource *clock = &clocksource_timebase;
 
 
 
 
 
 840
 841	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
 
 
 842		printk(KERN_ERR "clocksource: %s is already registered\n",
 843		       clock->name);
 844		return;
 845	}
 846
 847	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
 848	       clock->name, clock->mult, clock->shift);
 849}
 850
 851static int decrementer_set_next_event(unsigned long evt,
 852				      struct clock_event_device *dev)
 853{
 854	__this_cpu_write(decrementers_next_tb, get_tb() + evt);
 855	set_dec(evt);
 856
 857	/* We may have raced with new irq work */
 858	if (test_irq_work_pending())
 859		set_dec(1);
 860
 861	return 0;
 862}
 863
 864static int decrementer_shutdown(struct clock_event_device *dev)
 
 865{
 866	decrementer_set_next_event(decrementer_max, dev);
 867	return 0;
 868}
 869
 870static void register_decrementer_clockevent(int cpu)
 
 871{
 872	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
 873
 874	*dec = decrementer_clockevent;
 875	dec->cpumask = cpumask_of(cpu);
 876
 877	clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
 878
 879	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
 880		    dec->name, dec->mult, dec->shift, cpu);
 881
 882	/* Set values for KVM, see kvm_emulate_dec() */
 883	decrementer_clockevent.mult = dec->mult;
 884	decrementer_clockevent.shift = dec->shift;
 885}
 886
 887static void enable_large_decrementer(void)
 888{
 889	if (!cpu_has_feature(CPU_FTR_ARCH_300))
 890		return;
 891
 892	if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
 893		return;
 
 
 894
 895	/*
 896	 * If we're running as the hypervisor we need to enable the LD manually
 897	 * otherwise firmware should have done it for us.
 898	 */
 899	if (cpu_has_feature(CPU_FTR_HVMODE))
 900		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
 901}
 902
 903static void __init set_decrementer_max(void)
 904{
 905	struct device_node *cpu;
 906	u32 bits = 32;
 907
 908	/* Prior to ISAv3 the decrementer is always 32 bit */
 909	if (!cpu_has_feature(CPU_FTR_ARCH_300))
 910		return;
 911
 912	cpu = of_find_node_by_type(NULL, "cpu");
 913
 914	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
 915		if (bits > 64 || bits < 32) {
 916			pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
 917			bits = 32;
 918		}
 919
 920		/* calculate the signed maximum given this many bits */
 921		decrementer_max = (1ul << (bits - 1)) - 1;
 922	}
 923
 924	of_node_put(cpu);
 
 925
 926	pr_info("time_init: %u bit decrementer (max: %llx)\n",
 927		bits, decrementer_max);
 928}
 929
 930static void __init init_decrementer_clockevent(void)
 931{
 932	register_decrementer_clockevent(smp_processor_id());
 
 
 
 
 
 
 
 
 933}
 934
 935void secondary_cpu_time_init(void)
 936{
 937	/* Enable and test the large decrementer for this cpu */
 938	enable_large_decrementer();
 939
 940	/* Start the decrementer on CPUs that have manual control
 941	 * such as BookE
 942	 */
 943	start_cpu_decrementer();
 944
 945	/* FIME: Should make unrelatred change to move snapshot_timebase
 946	 * call here ! */
 947	register_decrementer_clockevent(smp_processor_id());
 948}
 949
 950/* This function is only called on the boot processor */
 951void __init time_init(void)
 952{
 953	struct div_result res;
 954	u64 scale;
 955	unsigned shift;
 956
 957	/* Normal PowerPC with timebase register */
 958	ppc_md.calibrate_decr();
 959	printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
 960	       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
 961	printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
 962	       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
 
 
 
 
 
 963
 964	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
 965	tb_ticks_per_sec = ppc_tb_freq;
 966	tb_ticks_per_usec = ppc_tb_freq / 1000000;
 967	calc_cputime_factors();
 
 968
 969	/*
 970	 * Compute scale factor for sched_clock.
 971	 * The calibrate_decr() function has set tb_ticks_per_sec,
 972	 * which is the timebase frequency.
 973	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
 974	 * the 128-bit result as a 64.64 fixed-point number.
 975	 * We then shift that number right until it is less than 1.0,
 976	 * giving us the scale factor and shift count to use in
 977	 * sched_clock().
 978	 */
 979	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
 980	scale = res.result_low;
 981	for (shift = 0; res.result_high != 0; ++shift) {
 982		scale = (scale >> 1) | (res.result_high << 63);
 983		res.result_high >>= 1;
 984	}
 985	tb_to_ns_scale = scale;
 986	tb_to_ns_shift = shift;
 987	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
 988	boot_tb = get_tb();
 989
 990	/* If platform provided a timezone (pmac), we correct the time */
 991	if (timezone_offset) {
 992		sys_tz.tz_minuteswest = -timezone_offset / 60;
 993		sys_tz.tz_dsttime = 0;
 994	}
 995
 
 996	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
 997
 998	/* initialise and enable the large decrementer (if we have one) */
 999	set_decrementer_max();
1000	enable_large_decrementer();
1001
1002	/* Start the decrementer on CPUs that have manual control
1003	 * such as BookE
1004	 */
1005	start_cpu_decrementer();
1006
1007	/* Register the clocksource */
1008	clocksource_init();
 
1009
1010	init_decrementer_clockevent();
1011	tick_setup_hrtimer_broadcast();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012
1013	of_clk_init(NULL);
1014	enable_sched_clock_irqtime();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015}
1016
1017/*
1018 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1019 * result.
1020 */
1021void div128_by_32(u64 dividend_high, u64 dividend_low,
1022		  unsigned divisor, struct div_result *dr)
1023{
1024	unsigned long a, b, c, d;
1025	unsigned long w, x, y, z;
1026	u64 ra, rb, rc;
1027
1028	a = dividend_high >> 32;
1029	b = dividend_high & 0xffffffff;
1030	c = dividend_low >> 32;
1031	d = dividend_low & 0xffffffff;
1032
1033	w = a / divisor;
1034	ra = ((u64)(a - (w * divisor)) << 32) + b;
1035
1036	rb = ((u64) do_div(ra, divisor) << 32) + c;
1037	x = ra;
1038
1039	rc = ((u64) do_div(rb, divisor) << 32) + d;
1040	y = rb;
1041
1042	do_div(rc, divisor);
1043	z = rc;
1044
1045	dr->result_high = ((u64)w << 32) + x;
1046	dr->result_low  = ((u64)y << 32) + z;
1047
1048}
1049
1050/* We don't need to calibrate delay, we use the CPU timebase for that */
1051void calibrate_delay(void)
1052{
1053	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1054	 * as the number of __delay(1) in a jiffy, so make it so
1055	 */
1056	loops_per_jiffy = tb_ticks_per_jiffy;
1057}
1058
1059#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1060static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1061{
1062	ppc_md.get_rtc_time(tm);
1063	return 0;
1064}
1065
1066static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1067{
1068	if (!ppc_md.set_rtc_time)
1069		return -EOPNOTSUPP;
1070
1071	if (ppc_md.set_rtc_time(tm) < 0)
1072		return -EOPNOTSUPP;
1073
1074	return 0;
1075}
1076
1077static const struct rtc_class_ops rtc_generic_ops = {
1078	.read_time = rtc_generic_get_time,
1079	.set_time = rtc_generic_set_time,
1080};
1081
1082static int __init rtc_init(void)
1083{
1084	struct platform_device *pdev;
1085
1086	if (!ppc_md.get_rtc_time)
1087		return -ENODEV;
1088
1089	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1090					     &rtc_generic_ops,
1091					     sizeof(rtc_generic_ops));
1092
1093	return PTR_ERR_OR_ZERO(pdev);
1094}
1095
1096device_initcall(rtc_init);
1097#endif
v3.1
 
   1/*
   2 * Common time routines among all ppc machines.
   3 *
   4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
   5 * Paul Mackerras' version and mine for PReP and Pmac.
   6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
   7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
   8 *
   9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  10 * to make clock more stable (2.4.0-test5). The only thing
  11 * that this code assumes is that the timebases have been synchronized
  12 * by firmware on SMP and are never stopped (never do sleep
  13 * on SMP then, nap and doze are OK).
  14 * 
  15 * Speeded up do_gettimeofday by getting rid of references to
  16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  17 *
  18 * TODO (not necessarily in this file):
  19 * - improve precision and reproducibility of timebase frequency
  20 * measurement at boot time. (for iSeries, we calibrate the timebase
  21 * against the Titan chip's clock.)
  22 * - for astronomical applications: add a new function to get
  23 * non ambiguous timestamps even around leap seconds. This needs
  24 * a new timestamp format and a good name.
  25 *
  26 * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
  27 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
  28 *
  29 *      This program is free software; you can redistribute it and/or
  30 *      modify it under the terms of the GNU General Public License
  31 *      as published by the Free Software Foundation; either version
  32 *      2 of the License, or (at your option) any later version.
  33 */
  34
  35#include <linux/errno.h>
  36#include <linux/module.h>
  37#include <linux/sched.h>
 
  38#include <linux/kernel.h>
  39#include <linux/param.h>
  40#include <linux/string.h>
  41#include <linux/mm.h>
  42#include <linux/interrupt.h>
  43#include <linux/timex.h>
  44#include <linux/kernel_stat.h>
  45#include <linux/time.h>
  46#include <linux/init.h>
  47#include <linux/profile.h>
  48#include <linux/cpu.h>
  49#include <linux/security.h>
  50#include <linux/percpu.h>
  51#include <linux/rtc.h>
  52#include <linux/jiffies.h>
  53#include <linux/posix-timers.h>
  54#include <linux/irq.h>
  55#include <linux/delay.h>
  56#include <linux/irq_work.h>
 
 
 
 
 
  57#include <asm/trace.h>
  58
 
  59#include <asm/io.h>
  60#include <asm/processor.h>
  61#include <asm/nvram.h>
  62#include <asm/cache.h>
  63#include <asm/machdep.h>
  64#include <asm/uaccess.h>
  65#include <asm/time.h>
  66#include <asm/prom.h>
  67#include <asm/irq.h>
  68#include <asm/div64.h>
  69#include <asm/smp.h>
  70#include <asm/vdso_datapage.h>
  71#include <asm/firmware.h>
  72#include <asm/cputime.h>
  73#ifdef CONFIG_PPC_ISERIES
  74#include <asm/iseries/it_lp_queue.h>
  75#include <asm/iseries/hv_call_xm.h>
  76#endif
  77
  78/* powerpc clocksource/clockevent code */
  79
  80#include <linux/clockchips.h>
  81#include <linux/clocksource.h>
  82
  83static cycle_t rtc_read(struct clocksource *);
  84static struct clocksource clocksource_rtc = {
  85	.name         = "rtc",
  86	.rating       = 400,
  87	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
  88	.mask         = CLOCKSOURCE_MASK(64),
  89	.shift        = 22,
  90	.mult         = 0,	/* To be filled in */
  91	.read         = rtc_read,
  92};
  93
  94static cycle_t timebase_read(struct clocksource *);
  95static struct clocksource clocksource_timebase = {
  96	.name         = "timebase",
  97	.rating       = 400,
  98	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
  99	.mask         = CLOCKSOURCE_MASK(64),
 100	.shift        = 22,
 101	.mult         = 0,	/* To be filled in */
 102	.read         = timebase_read,
 
 103};
 104
 105#define DECREMENTER_MAX	0x7fffffff
 
 106
 107static int decrementer_set_next_event(unsigned long evt,
 108				      struct clock_event_device *dev);
 109static void decrementer_set_mode(enum clock_event_mode mode,
 110				 struct clock_event_device *dev);
 111
 112static struct clock_event_device decrementer_clockevent = {
 113       .name           = "decrementer",
 114       .rating         = 200,
 115       .shift          = 0,	/* To be filled in */
 116       .mult           = 0,	/* To be filled in */
 117       .irq            = 0,
 118       .set_next_event = decrementer_set_next_event,
 119       .set_mode       = decrementer_set_mode,
 120       .features       = CLOCK_EVT_FEAT_ONESHOT,
 121};
 122
 123struct decrementer_clock {
 124	struct clock_event_device event;
 125	u64 next_tb;
 
 
 
 
 
 
 
 126};
 
 127
 128static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
 129
 130#ifdef CONFIG_PPC_ISERIES
 131static unsigned long __initdata iSeries_recal_titan;
 132static signed long __initdata iSeries_recal_tb;
 133
 134/* Forward declaration is only needed for iSereis compiles */
 135static void __init clocksource_init(void);
 136#endif
 137
 138#define XSEC_PER_SEC (1024*1024)
 139
 140#ifdef CONFIG_PPC64
 141#define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
 142#else
 143/* compute ((xsec << 12) * max) >> 32 */
 144#define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
 145#endif
 146
 147unsigned long tb_ticks_per_jiffy;
 148unsigned long tb_ticks_per_usec = 100; /* sane default */
 149EXPORT_SYMBOL(tb_ticks_per_usec);
 150unsigned long tb_ticks_per_sec;
 151EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
 152
 153DEFINE_SPINLOCK(rtc_lock);
 154EXPORT_SYMBOL_GPL(rtc_lock);
 155
 156static u64 tb_to_ns_scale __read_mostly;
 157static unsigned tb_to_ns_shift __read_mostly;
 158static u64 boot_tb __read_mostly;
 159
 160extern struct timezone sys_tz;
 161static long timezone_offset;
 162
 163unsigned long ppc_proc_freq;
 164EXPORT_SYMBOL_GPL(ppc_proc_freq);
 165unsigned long ppc_tb_freq;
 166EXPORT_SYMBOL_GPL(ppc_tb_freq);
 167
 168#ifdef CONFIG_VIRT_CPU_ACCOUNTING
 
 
 169/*
 170 * Factors for converting from cputime_t (timebase ticks) to
 171 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
 172 * These are all stored as 0.64 fixed-point binary fractions.
 173 */
 174u64 __cputime_jiffies_factor;
 175EXPORT_SYMBOL(__cputime_jiffies_factor);
 176u64 __cputime_msec_factor;
 177EXPORT_SYMBOL(__cputime_msec_factor);
 178u64 __cputime_sec_factor;
 179EXPORT_SYMBOL(__cputime_sec_factor);
 180u64 __cputime_clockt_factor;
 181EXPORT_SYMBOL(__cputime_clockt_factor);
 182DEFINE_PER_CPU(unsigned long, cputime_last_delta);
 183DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
 184
 185cputime_t cputime_one_jiffy;
 186
 
 187void (*dtl_consumer)(struct dtl_entry *, u64);
 
 188
 189static void calc_cputime_factors(void)
 190{
 191	struct div_result res;
 192
 193	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
 194	__cputime_jiffies_factor = res.result_low;
 195	div128_by_32(1000, 0, tb_ticks_per_sec, &res);
 196	__cputime_msec_factor = res.result_low;
 197	div128_by_32(1, 0, tb_ticks_per_sec, &res);
 198	__cputime_sec_factor = res.result_low;
 199	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
 200	__cputime_clockt_factor = res.result_low;
 201}
 202
 203/*
 204 * Read the SPURR on systems that have it, otherwise the PURR,
 205 * or if that doesn't exist return the timebase value passed in.
 206 */
 207static u64 read_spurr(u64 tb)
 208{
 209	if (cpu_has_feature(CPU_FTR_SPURR))
 210		return mfspr(SPRN_SPURR);
 211	if (cpu_has_feature(CPU_FTR_PURR))
 212		return mfspr(SPRN_PURR);
 213	return tb;
 214}
 215
 216#ifdef CONFIG_PPC_SPLPAR
 217
 
 
 218/*
 219 * Scan the dispatch trace log and count up the stolen time.
 220 * Should be called with interrupts disabled.
 221 */
 222static u64 scan_dispatch_log(u64 stop_tb)
 223{
 224	u64 i = local_paca->dtl_ridx;
 225	struct dtl_entry *dtl = local_paca->dtl_curr;
 226	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
 227	struct lppaca *vpa = local_paca->lppaca_ptr;
 228	u64 tb_delta;
 229	u64 stolen = 0;
 230	u64 dtb;
 231
 232	if (!dtl)
 233		return 0;
 234
 235	if (i == vpa->dtl_idx)
 236		return 0;
 237	while (i < vpa->dtl_idx) {
 238		if (dtl_consumer)
 239			dtl_consumer(dtl, i);
 240		dtb = dtl->timebase;
 241		tb_delta = dtl->enqueue_to_dispatch_time +
 242			dtl->ready_to_enqueue_time;
 243		barrier();
 244		if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
 245			/* buffer has overflowed */
 246			i = vpa->dtl_idx - N_DISPATCH_LOG;
 247			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
 248			continue;
 249		}
 250		if (dtb > stop_tb)
 251			break;
 
 
 252		stolen += tb_delta;
 253		++i;
 254		++dtl;
 255		if (dtl == dtl_end)
 256			dtl = local_paca->dispatch_log;
 257	}
 258	local_paca->dtl_ridx = i;
 259	local_paca->dtl_curr = dtl;
 260	return stolen;
 261}
 262
 263/*
 264 * Accumulate stolen time by scanning the dispatch trace log.
 265 * Called on entry from user mode.
 266 */
 267void accumulate_stolen_time(void)
 268{
 269	u64 sst, ust;
 
 270
 271	u8 save_soft_enabled = local_paca->soft_enabled;
 272	u8 save_hard_enabled = local_paca->hard_enabled;
 273
 274	/* We are called early in the exception entry, before
 275	 * soft/hard_enabled are sync'ed to the expected state
 276	 * for the exception. We are hard disabled but the PACA
 277	 * needs to reflect that so various debug stuff doesn't
 278	 * complain
 279	 */
 280	local_paca->soft_enabled = 0;
 281	local_paca->hard_enabled = 0;
 282
 283	sst = scan_dispatch_log(local_paca->starttime_user);
 284	ust = scan_dispatch_log(local_paca->starttime);
 285	local_paca->system_time -= sst;
 286	local_paca->user_time -= ust;
 287	local_paca->stolen_time += ust + sst;
 288
 289	local_paca->soft_enabled = save_soft_enabled;
 290	local_paca->hard_enabled = save_hard_enabled;
 291}
 292
 293static inline u64 calculate_stolen_time(u64 stop_tb)
 294{
 295	u64 stolen = 0;
 
 296
 297	if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
 298		stolen = scan_dispatch_log(stop_tb);
 299		get_paca()->system_time -= stolen;
 300	}
 301
 302	stolen += get_paca()->stolen_time;
 303	get_paca()->stolen_time = 0;
 304	return stolen;
 305}
 306
 307#else /* CONFIG_PPC_SPLPAR */
 308static inline u64 calculate_stolen_time(u64 stop_tb)
 309{
 310	return 0;
 311}
 312
 313#endif /* CONFIG_PPC_SPLPAR */
 314
 315/*
 316 * Account time for a transition between system, hard irq
 317 * or soft irq state.
 318 */
 319void account_system_vtime(struct task_struct *tsk)
 
 320{
 321	u64 now, nowscaled, delta, deltascaled;
 322	unsigned long flags;
 323	u64 stolen, udelta, sys_scaled, user_scaled;
 
 324
 325	local_irq_save(flags);
 326	now = mftb();
 327	nowscaled = read_spurr(now);
 328	get_paca()->system_time += now - get_paca()->starttime;
 329	get_paca()->starttime = now;
 330	deltascaled = nowscaled - get_paca()->startspurr;
 331	get_paca()->startspurr = nowscaled;
 332
 333	stolen = calculate_stolen_time(now);
 334
 335	delta = get_paca()->system_time;
 336	get_paca()->system_time = 0;
 337	udelta = get_paca()->user_time - get_paca()->utime_sspurr;
 338	get_paca()->utime_sspurr = get_paca()->user_time;
 339
 340	/*
 341	 * Because we don't read the SPURR on every kernel entry/exit,
 342	 * deltascaled includes both user and system SPURR ticks.
 343	 * Apportion these ticks to system SPURR ticks and user
 344	 * SPURR ticks in the same ratio as the system time (delta)
 345	 * and user time (udelta) values obtained from the timebase
 346	 * over the same interval.  The system ticks get accounted here;
 347	 * the user ticks get saved up in paca->user_time_scaled to be
 348	 * used by account_process_tick.
 349	 */
 350	sys_scaled = delta;
 351	user_scaled = udelta;
 352	if (deltascaled != delta + udelta) {
 353		if (udelta) {
 354			sys_scaled = deltascaled * delta / (delta + udelta);
 355			user_scaled = deltascaled - sys_scaled;
 356		} else {
 357			sys_scaled = deltascaled;
 358		}
 359	}
 360	get_paca()->user_time_scaled += user_scaled;
 
 361
 362	if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
 363		account_system_time(tsk, 0, delta, sys_scaled);
 364		if (stolen)
 365			account_steal_time(stolen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366	} else {
 367		account_idle_time(delta + stolen);
 
 
 
 368	}
 369	local_irq_restore(flags);
 370}
 371EXPORT_SYMBOL_GPL(account_system_vtime);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372
 373/*
 374 * Transfer the user and system times accumulated in the paca
 375 * by the exception entry and exit code to the generic process
 376 * user and system time records.
 377 * Must be called with interrupts disabled.
 378 * Assumes that account_system_vtime() has been called recently
 379 * (i.e. since the last entry from usermode) so that
 380 * get_paca()->user_time_scaled is up to date.
 381 */
 382void account_process_tick(struct task_struct *tsk, int user_tick)
 383{
 384	cputime_t utime, utimescaled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 385
 386	utime = get_paca()->user_time;
 387	utimescaled = get_paca()->user_time_scaled;
 388	get_paca()->user_time = 0;
 389	get_paca()->user_time_scaled = 0;
 390	get_paca()->utime_sspurr = 0;
 391	account_user_time(tsk, utime, utimescaled);
 
 
 
 
 
 
 
 
 
 
 
 
 
 392}
 393
 394#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
 395#define calc_cputime_factors()
 396#endif
 397
 398void __delay(unsigned long loops)
 399{
 400	unsigned long start;
 401	int diff;
 402
 403	if (__USE_RTC()) {
 404		start = get_rtcl();
 405		do {
 406			/* the RTCL register wraps at 1000000000 */
 407			diff = get_rtcl() - start;
 408			if (diff < 0)
 409				diff += 1000000000;
 410		} while (diff < loops);
 411	} else {
 412		start = get_tbl();
 413		while (get_tbl() - start < loops)
 414			HMT_low();
 415		HMT_medium();
 416	}
 
 417}
 418EXPORT_SYMBOL(__delay);
 419
 420void udelay(unsigned long usecs)
 421{
 422	__delay(tb_ticks_per_usec * usecs);
 423}
 424EXPORT_SYMBOL(udelay);
 425
 426#ifdef CONFIG_SMP
 427unsigned long profile_pc(struct pt_regs *regs)
 428{
 429	unsigned long pc = instruction_pointer(regs);
 430
 431	if (in_lock_functions(pc))
 432		return regs->link;
 433
 434	return pc;
 435}
 436EXPORT_SYMBOL(profile_pc);
 437#endif
 438
 439#ifdef CONFIG_PPC_ISERIES
 440
 441/* 
 442 * This function recalibrates the timebase based on the 49-bit time-of-day
 443 * value in the Titan chip.  The Titan is much more accurate than the value
 444 * returned by the service processor for the timebase frequency.  
 445 */
 446
 447static int __init iSeries_tb_recal(void)
 448{
 449	unsigned long titan, tb;
 450
 451	/* Make sure we only run on iSeries */
 452	if (!firmware_has_feature(FW_FEATURE_ISERIES))
 453		return -ENODEV;
 454
 455	tb = get_tb();
 456	titan = HvCallXm_loadTod();
 457	if ( iSeries_recal_titan ) {
 458		unsigned long tb_ticks = tb - iSeries_recal_tb;
 459		unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
 460		unsigned long new_tb_ticks_per_sec   = (tb_ticks * USEC_PER_SEC)/titan_usec;
 461		unsigned long new_tb_ticks_per_jiffy =
 462			DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
 463		long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
 464		char sign = '+';		
 465		/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
 466		new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
 467
 468		if ( tick_diff < 0 ) {
 469			tick_diff = -tick_diff;
 470			sign = '-';
 471		}
 472		if ( tick_diff ) {
 473			if ( tick_diff < tb_ticks_per_jiffy/25 ) {
 474				printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
 475						new_tb_ticks_per_jiffy, sign, tick_diff );
 476				tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
 477				tb_ticks_per_sec   = new_tb_ticks_per_sec;
 478				calc_cputime_factors();
 479				vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
 480				setup_cputime_one_jiffy();
 481			}
 482			else {
 483				printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
 484					"                   new tb_ticks_per_jiffy = %lu\n"
 485					"                   old tb_ticks_per_jiffy = %lu\n",
 486					new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
 487			}
 488		}
 489	}
 490	iSeries_recal_titan = titan;
 491	iSeries_recal_tb = tb;
 492
 493	/* Called here as now we know accurate values for the timebase */
 494	clocksource_init();
 495	return 0;
 496}
 497late_initcall(iSeries_tb_recal);
 498
 499/* Called from platform early init */
 500void __init iSeries_time_init_early(void)
 501{
 502	iSeries_recal_tb = get_tb();
 503	iSeries_recal_titan = HvCallXm_loadTod();
 504}
 505#endif /* CONFIG_PPC_ISERIES */
 506
 507#ifdef CONFIG_IRQ_WORK
 508
 509/*
 510 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
 511 */
 512#ifdef CONFIG_PPC64
 513static inline unsigned long test_irq_work_pending(void)
 514{
 515	unsigned long x;
 516
 517	asm volatile("lbz %0,%1(13)"
 518		: "=r" (x)
 519		: "i" (offsetof(struct paca_struct, irq_work_pending)));
 520	return x;
 521}
 522
 523static inline void set_irq_work_pending_flag(void)
 524{
 525	asm volatile("stb %0,%1(13)" : :
 526		"r" (1),
 527		"i" (offsetof(struct paca_struct, irq_work_pending)));
 528}
 529
 530static inline void clear_irq_work_pending(void)
 531{
 532	asm volatile("stb %0,%1(13)" : :
 533		"r" (0),
 534		"i" (offsetof(struct paca_struct, irq_work_pending)));
 535}
 536
 537#else /* 32-bit */
 538
 539DEFINE_PER_CPU(u8, irq_work_pending);
 540
 541#define set_irq_work_pending_flag()	__get_cpu_var(irq_work_pending) = 1
 542#define test_irq_work_pending()		__get_cpu_var(irq_work_pending)
 543#define clear_irq_work_pending()	__get_cpu_var(irq_work_pending) = 0
 544
 545#endif /* 32 vs 64 bit */
 546
 547void arch_irq_work_raise(void)
 548{
 
 
 
 
 
 
 
 
 
 
 
 549	preempt_disable();
 550	set_irq_work_pending_flag();
 551	set_dec(1);
 552	preempt_enable();
 553}
 554
 555#else  /* CONFIG_IRQ_WORK */
 556
 557#define test_irq_work_pending()	0
 558#define clear_irq_work_pending()
 559
 560#endif /* CONFIG_IRQ_WORK */
 561
 562/*
 563 * For iSeries shared processors, we have to let the hypervisor
 564 * set the hardware decrementer.  We set a virtual decrementer
 565 * in the lppaca and call the hypervisor if the virtual
 566 * decrementer is less than the current value in the hardware
 567 * decrementer. (almost always the new decrementer value will
 568 * be greater than the current hardware decementer so the hypervisor
 569 * call will not be needed)
 570 */
 571
 572/*
 573 * timer_interrupt - gets called when the decrementer overflows,
 574 * with interrupts disabled.
 575 */
 576void timer_interrupt(struct pt_regs * regs)
 577{
 
 
 578	struct pt_regs *old_regs;
 579	struct decrementer_clock *decrementer =  &__get_cpu_var(decrementers);
 580	struct clock_event_device *evt = &decrementer->event;
 581	u64 now;
 582
 
 
 
 
 
 
 
 
 
 583	/* Ensure a positive value is written to the decrementer, or else
 584	 * some CPUs will continue to take decrementer exceptions.
 
 
 
 585	 */
 586	set_dec(DECREMENTER_MAX);
 
 
 
 587
 588	/* Some implementations of hotplug will get timer interrupts while
 589	 * offline, just ignore these
 590	 */
 591	if (!cpu_online(smp_processor_id()))
 592		return;
 593
 594	trace_timer_interrupt_entry(regs);
 595
 596	__get_cpu_var(irq_stat).timer_irqs++;
 597
 598#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
 599	if (atomic_read(&ppc_n_lost_interrupts) != 0)
 600		do_IRQ(regs);
 601#endif
 602
 603	old_regs = set_irq_regs(regs);
 604	irq_enter();
 
 605
 606	if (test_irq_work_pending()) {
 607		clear_irq_work_pending();
 608		irq_work_run();
 609	}
 610
 611#ifdef CONFIG_PPC_ISERIES
 612	if (firmware_has_feature(FW_FEATURE_ISERIES))
 613		get_lppaca()->int_dword.fields.decr_int = 0;
 614#endif
 615
 616	now = get_tb_or_rtc();
 617	if (now >= decrementer->next_tb) {
 618		decrementer->next_tb = ~(u64)0;
 619		if (evt->event_handler)
 620			evt->event_handler(evt);
 
 621	} else {
 622		now = decrementer->next_tb - now;
 623		if (now <= DECREMENTER_MAX)
 624			set_dec((int)now);
 
 
 
 
 625	}
 626
 627#ifdef CONFIG_PPC_ISERIES
 628	if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
 629		process_hvlpevents();
 630#endif
 631
 632#ifdef CONFIG_PPC64
 633	/* collect purr register values often, for accurate calculations */
 634	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
 635		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
 636		cu->current_tb = mfspr(SPRN_PURR);
 637	}
 638#endif
 639
 640	irq_exit();
 641	set_irq_regs(old_regs);
 
 
 642
 643	trace_timer_interrupt_exit(regs);
 
 
 644}
 
 645
 646#ifdef CONFIG_SUSPEND
 647static void generic_suspend_disable_irqs(void)
 648{
 649	/* Disable the decrementer, so that it doesn't interfere
 650	 * with suspending.
 651	 */
 652
 653	set_dec(0x7fffffff);
 654	local_irq_disable();
 655	set_dec(0x7fffffff);
 656}
 657
 658static void generic_suspend_enable_irqs(void)
 659{
 660	local_irq_enable();
 661}
 662
 663/* Overrides the weak version in kernel/power/main.c */
 664void arch_suspend_disable_irqs(void)
 665{
 666	if (ppc_md.suspend_disable_irqs)
 667		ppc_md.suspend_disable_irqs();
 668	generic_suspend_disable_irqs();
 669}
 670
 671/* Overrides the weak version in kernel/power/main.c */
 672void arch_suspend_enable_irqs(void)
 673{
 674	generic_suspend_enable_irqs();
 675	if (ppc_md.suspend_enable_irqs)
 676		ppc_md.suspend_enable_irqs();
 677}
 678#endif
 679
 
 
 
 
 
 
 680/*
 681 * Scheduler clock - returns current time in nanosec units.
 682 *
 683 * Note: mulhdu(a, b) (multiply high double unsigned) returns
 684 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
 685 * are 64-bit unsigned numbers.
 686 */
 687unsigned long long sched_clock(void)
 688{
 689	if (__USE_RTC())
 690		return get_rtc();
 691	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 692}
 693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 694static int __init get_freq(char *name, int cells, unsigned long *val)
 695{
 696	struct device_node *cpu;
 697	const unsigned int *fp;
 698	int found = 0;
 699
 700	/* The cpu node should have timebase and clock frequency properties */
 701	cpu = of_find_node_by_type(NULL, "cpu");
 702
 703	if (cpu) {
 704		fp = of_get_property(cpu, name, NULL);
 705		if (fp) {
 706			found = 1;
 707			*val = of_read_ulong(fp, cells);
 708		}
 709
 710		of_node_put(cpu);
 711	}
 712
 713	return found;
 714}
 715
 716/* should become __cpuinit when secondary_cpu_time_init also is */
 717void start_cpu_decrementer(void)
 718{
 719#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 
 
 720	/* Clear any pending timer interrupts */
 721	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
 722
 723	/* Enable decrementer interrupt */
 724	mtspr(SPRN_TCR, TCR_DIE);
 725#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
 
 
 
 
 
 
 726}
 727
 728void __init generic_calibrate_decr(void)
 729{
 730	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
 731
 732	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
 733	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
 734
 735		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
 736				"(not found)\n");
 737	}
 738
 739	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
 740
 741	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
 742	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
 743
 744		printk(KERN_ERR "WARNING: Estimating processor frequency "
 745				"(not found)\n");
 746	}
 747}
 748
 749int update_persistent_clock(struct timespec now)
 750{
 751	struct rtc_time tm;
 752
 753	if (!ppc_md.set_rtc_time)
 754		return 0;
 755
 756	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
 757	tm.tm_year -= 1900;
 758	tm.tm_mon -= 1;
 759
 760	return ppc_md.set_rtc_time(&tm);
 761}
 762
 763static void __read_persistent_clock(struct timespec *ts)
 764{
 765	struct rtc_time tm;
 766	static int first = 1;
 767
 768	ts->tv_nsec = 0;
 769	/* XXX this is a litle fragile but will work okay in the short term */
 770	if (first) {
 771		first = 0;
 772		if (ppc_md.time_init)
 773			timezone_offset = ppc_md.time_init();
 774
 775		/* get_boot_time() isn't guaranteed to be safe to call late */
 776		if (ppc_md.get_boot_time) {
 777			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
 778			return;
 779		}
 780	}
 781	if (!ppc_md.get_rtc_time) {
 782		ts->tv_sec = 0;
 783		return;
 784	}
 785	ppc_md.get_rtc_time(&tm);
 786
 787	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
 788			    tm.tm_hour, tm.tm_min, tm.tm_sec);
 789}
 790
 791void read_persistent_clock(struct timespec *ts)
 792{
 793	__read_persistent_clock(ts);
 794
 795	/* Sanitize it in case real time clock is set below EPOCH */
 796	if (ts->tv_sec < 0) {
 797		ts->tv_sec = 0;
 798		ts->tv_nsec = 0;
 799	}
 800		
 801}
 802
 803/* clocksource code */
 804static cycle_t rtc_read(struct clocksource *cs)
 805{
 806	return (cycle_t)get_rtc();
 807}
 808
 809static cycle_t timebase_read(struct clocksource *cs)
 810{
 811	return (cycle_t)get_tb();
 812}
 813
 814void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
 815			struct clocksource *clock, u32 mult)
 816{
 817	u64 new_tb_to_xs, new_stamp_xsec;
 818	u32 frac_sec;
 819
 820	if (clock != &clocksource_timebase)
 821		return;
 822
 823	/* Make userspace gettimeofday spin until we're done. */
 824	++vdso_data->tb_update_count;
 825	smp_mb();
 826
 827	/* XXX this assumes clock->shift == 22 */
 828	/* 4611686018 ~= 2^(20+64-22) / 1e9 */
 829	new_tb_to_xs = (u64) mult * 4611686018ULL;
 830	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
 831	do_div(new_stamp_xsec, 1000000000);
 832	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
 833
 834	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
 835	/* this is tv_nsec / 1e9 as a 0.32 fraction */
 836	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
 837
 838	/*
 839	 * tb_update_count is used to allow the userspace gettimeofday code
 840	 * to assure itself that it sees a consistent view of the tb_to_xs and
 841	 * stamp_xsec variables.  It reads the tb_update_count, then reads
 842	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
 843	 * the two values of tb_update_count match and are even then the
 844	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
 845	 * loops back and reads them again until this criteria is met.
 846	 * We expect the caller to have done the first increment of
 847	 * vdso_data->tb_update_count already.
 848	 */
 849	vdso_data->tb_orig_stamp = clock->cycle_last;
 850	vdso_data->stamp_xsec = new_stamp_xsec;
 851	vdso_data->tb_to_xs = new_tb_to_xs;
 852	vdso_data->wtom_clock_sec = wtm->tv_sec;
 853	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
 854	vdso_data->stamp_xtime = *wall_time;
 855	vdso_data->stamp_sec_fraction = frac_sec;
 856	smp_wmb();
 857	++(vdso_data->tb_update_count);
 858}
 859
 860void update_vsyscall_tz(void)
 861{
 862	/* Make userspace gettimeofday spin until we're done. */
 863	++vdso_data->tb_update_count;
 864	smp_mb();
 865	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
 866	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
 867	smp_mb();
 868	++vdso_data->tb_update_count;
 869}
 870
 871static void __init clocksource_init(void)
 872{
 873	struct clocksource *clock;
 874
 875	if (__USE_RTC())
 876		clock = &clocksource_rtc;
 877	else
 878		clock = &clocksource_timebase;
 879
 880	clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
 881
 882	if (clocksource_register(clock)) {
 883		printk(KERN_ERR "clocksource: %s is already registered\n",
 884		       clock->name);
 885		return;
 886	}
 887
 888	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
 889	       clock->name, clock->mult, clock->shift);
 890}
 891
 892static int decrementer_set_next_event(unsigned long evt,
 893				      struct clock_event_device *dev)
 894{
 895	__get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
 896	set_dec(evt);
 
 
 
 
 
 897	return 0;
 898}
 899
 900static void decrementer_set_mode(enum clock_event_mode mode,
 901				 struct clock_event_device *dev)
 902{
 903	if (mode != CLOCK_EVT_MODE_ONESHOT)
 904		decrementer_set_next_event(DECREMENTER_MAX, dev);
 905}
 906
 907static inline uint64_t div_sc64(unsigned long ticks, unsigned long nsec,
 908				int shift)
 909{
 910	uint64_t tmp = ((uint64_t)ticks) << shift;
 
 
 
 911
 912	do_div(tmp, nsec);
 913	return tmp;
 
 
 
 
 
 
 914}
 915
 916static void __init setup_clockevent_multiplier(unsigned long hz)
 917{
 918	u64 mult, shift = 32;
 
 919
 920	while (1) {
 921		mult = div_sc64(hz, NSEC_PER_SEC, shift);
 922		if (mult && (mult >> 32UL) == 0UL)
 923			break;
 924
 925		shift--;
 926	}
 927
 928	decrementer_clockevent.shift = shift;
 929	decrementer_clockevent.mult = mult;
 
 930}
 931
 932static void register_decrementer_clockevent(int cpu)
 933{
 934	struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
 
 
 
 
 
 935
 936	*dec = decrementer_clockevent;
 937	dec->cpumask = cpumask_of(cpu);
 
 
 
 
 
 
 
 
 
 938
 939	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
 940		    dec->name, dec->mult, dec->shift, cpu);
 941
 942	clockevents_register_device(dec);
 
 943}
 944
 945static void __init init_decrementer_clockevent(void)
 946{
 947	int cpu = smp_processor_id();
 948
 949	setup_clockevent_multiplier(ppc_tb_freq);
 950	decrementer_clockevent.max_delta_ns =
 951		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
 952	decrementer_clockevent.min_delta_ns =
 953		clockevent_delta2ns(2, &decrementer_clockevent);
 954
 955	register_decrementer_clockevent(cpu);
 956}
 957
 958void secondary_cpu_time_init(void)
 959{
 
 
 
 960	/* Start the decrementer on CPUs that have manual control
 961	 * such as BookE
 962	 */
 963	start_cpu_decrementer();
 964
 965	/* FIME: Should make unrelatred change to move snapshot_timebase
 966	 * call here ! */
 967	register_decrementer_clockevent(smp_processor_id());
 968}
 969
 970/* This function is only called on the boot processor */
 971void __init time_init(void)
 972{
 973	struct div_result res;
 974	u64 scale;
 975	unsigned shift;
 976
 977	if (__USE_RTC()) {
 978		/* 601 processor: dec counts down by 128 every 128ns */
 979		ppc_tb_freq = 1000000000;
 980	} else {
 981		/* Normal PowerPC with timebase register */
 982		ppc_md.calibrate_decr();
 983		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
 984		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
 985		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
 986		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
 987	}
 988
 989	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
 990	tb_ticks_per_sec = ppc_tb_freq;
 991	tb_ticks_per_usec = ppc_tb_freq / 1000000;
 992	calc_cputime_factors();
 993	setup_cputime_one_jiffy();
 994
 995	/*
 996	 * Compute scale factor for sched_clock.
 997	 * The calibrate_decr() function has set tb_ticks_per_sec,
 998	 * which is the timebase frequency.
 999	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1000	 * the 128-bit result as a 64.64 fixed-point number.
1001	 * We then shift that number right until it is less than 1.0,
1002	 * giving us the scale factor and shift count to use in
1003	 * sched_clock().
1004	 */
1005	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1006	scale = res.result_low;
1007	for (shift = 0; res.result_high != 0; ++shift) {
1008		scale = (scale >> 1) | (res.result_high << 63);
1009		res.result_high >>= 1;
1010	}
1011	tb_to_ns_scale = scale;
1012	tb_to_ns_shift = shift;
1013	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1014	boot_tb = get_tb_or_rtc();
1015
1016	/* If platform provided a timezone (pmac), we correct the time */
1017        if (timezone_offset) {
1018		sys_tz.tz_minuteswest = -timezone_offset / 60;
1019		sys_tz.tz_dsttime = 0;
1020        }
1021
1022	vdso_data->tb_update_count = 0;
1023	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1024
 
 
 
 
1025	/* Start the decrementer on CPUs that have manual control
1026	 * such as BookE
1027	 */
1028	start_cpu_decrementer();
1029
1030	/* Register the clocksource, if we're not running on iSeries */
1031	if (!firmware_has_feature(FW_FEATURE_ISERIES))
1032		clocksource_init();
1033
1034	init_decrementer_clockevent();
1035}
1036
1037
1038#define FEBRUARY	2
1039#define	STARTOFTIME	1970
1040#define SECDAY		86400L
1041#define SECYR		(SECDAY * 365)
1042#define	leapyear(year)		((year) % 4 == 0 && \
1043				 ((year) % 100 != 0 || (year) % 400 == 0))
1044#define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
1045#define	days_in_month(a) 	(month_days[(a) - 1])
1046
1047static int month_days[12] = {
1048	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1049};
1050
1051/*
1052 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1053 */
1054void GregorianDay(struct rtc_time * tm)
1055{
1056	int leapsToDate;
1057	int lastYear;
1058	int day;
1059	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1060
1061	lastYear = tm->tm_year - 1;
1062
1063	/*
1064	 * Number of leap corrections to apply up to end of last year
1065	 */
1066	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1067
1068	/*
1069	 * This year is a leap year if it is divisible by 4 except when it is
1070	 * divisible by 100 unless it is divisible by 400
1071	 *
1072	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1073	 */
1074	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1075
1076	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1077		   tm->tm_mday;
1078
1079	tm->tm_wday = day % 7;
1080}
1081
1082void to_tm(int tim, struct rtc_time * tm)
1083{
1084	register int    i;
1085	register long   hms, day;
1086
1087	day = tim / SECDAY;
1088	hms = tim % SECDAY;
1089
1090	/* Hours, minutes, seconds are easy */
1091	tm->tm_hour = hms / 3600;
1092	tm->tm_min = (hms % 3600) / 60;
1093	tm->tm_sec = (hms % 3600) % 60;
1094
1095	/* Number of years in days */
1096	for (i = STARTOFTIME; day >= days_in_year(i); i++)
1097		day -= days_in_year(i);
1098	tm->tm_year = i;
1099
1100	/* Number of months in days left */
1101	if (leapyear(tm->tm_year))
1102		days_in_month(FEBRUARY) = 29;
1103	for (i = 1; day >= days_in_month(i); i++)
1104		day -= days_in_month(i);
1105	days_in_month(FEBRUARY) = 28;
1106	tm->tm_mon = i;
1107
1108	/* Days are what is left over (+1) from all that. */
1109	tm->tm_mday = day + 1;
1110
1111	/*
1112	 * Determine the day of week
1113	 */
1114	GregorianDay(tm);
1115}
1116
1117/*
1118 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1119 * result.
1120 */
1121void div128_by_32(u64 dividend_high, u64 dividend_low,
1122		  unsigned divisor, struct div_result *dr)
1123{
1124	unsigned long a, b, c, d;
1125	unsigned long w, x, y, z;
1126	u64 ra, rb, rc;
1127
1128	a = dividend_high >> 32;
1129	b = dividend_high & 0xffffffff;
1130	c = dividend_low >> 32;
1131	d = dividend_low & 0xffffffff;
1132
1133	w = a / divisor;
1134	ra = ((u64)(a - (w * divisor)) << 32) + b;
1135
1136	rb = ((u64) do_div(ra, divisor) << 32) + c;
1137	x = ra;
1138
1139	rc = ((u64) do_div(rb, divisor) << 32) + d;
1140	y = rb;
1141
1142	do_div(rc, divisor);
1143	z = rc;
1144
1145	dr->result_high = ((u64)w << 32) + x;
1146	dr->result_low  = ((u64)y << 32) + z;
1147
1148}
1149
1150/* We don't need to calibrate delay, we use the CPU timebase for that */
1151void calibrate_delay(void)
1152{
1153	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1154	 * as the number of __delay(1) in a jiffy, so make it so
1155	 */
1156	loops_per_jiffy = tb_ticks_per_jiffy;
1157}
1158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1159static int __init rtc_init(void)
1160{
1161	struct platform_device *pdev;
1162
1163	if (!ppc_md.get_rtc_time)
1164		return -ENODEV;
1165
1166	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1167	if (IS_ERR(pdev))
1168		return PTR_ERR(pdev);
1169
1170	return 0;
1171}
1172
1173module_init(rtc_init);