Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Common time routines among all ppc machines.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
   6 * Paul Mackerras' version and mine for PReP and Pmac.
   7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
   8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
   9 *
  10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  11 * to make clock more stable (2.4.0-test5). The only thing
  12 * that this code assumes is that the timebases have been synchronized
  13 * by firmware on SMP and are never stopped (never do sleep
  14 * on SMP then, nap and doze are OK).
  15 * 
  16 * Speeded up do_gettimeofday by getting rid of references to
  17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  18 *
  19 * TODO (not necessarily in this file):
  20 * - improve precision and reproducibility of timebase frequency
  21 * measurement at boot time.
  22 * - for astronomical applications: add a new function to get
  23 * non ambiguous timestamps even around leap seconds. This needs
  24 * a new timestamp format and a good name.
  25 *
  26 * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
  27 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
 
 
 
 
 
  28 */
  29
  30#include <linux/errno.h>
  31#include <linux/export.h>
  32#include <linux/sched.h>
  33#include <linux/sched/clock.h>
  34#include <linux/sched/cputime.h>
  35#include <linux/kernel.h>
  36#include <linux/param.h>
  37#include <linux/string.h>
  38#include <linux/mm.h>
  39#include <linux/interrupt.h>
  40#include <linux/timex.h>
  41#include <linux/kernel_stat.h>
  42#include <linux/time.h>
 
  43#include <linux/init.h>
  44#include <linux/profile.h>
  45#include <linux/cpu.h>
  46#include <linux/security.h>
  47#include <linux/percpu.h>
  48#include <linux/rtc.h>
  49#include <linux/jiffies.h>
  50#include <linux/posix-timers.h>
  51#include <linux/irq.h>
  52#include <linux/delay.h>
  53#include <linux/irq_work.h>
  54#include <linux/of_clk.h>
  55#include <linux/suspend.h>
  56#include <linux/processor.h>
  57#include <linux/mc146818rtc.h>
  58#include <linux/platform_device.h>
  59
  60#include <asm/trace.h>
  61#include <asm/interrupt.h>
  62#include <asm/io.h>
 
  63#include <asm/nvram.h>
  64#include <asm/cache.h>
  65#include <asm/machdep.h>
  66#include <linux/uaccess.h>
  67#include <asm/time.h>
 
  68#include <asm/irq.h>
  69#include <asm/div64.h>
  70#include <asm/smp.h>
  71#include <asm/vdso_datapage.h>
  72#include <asm/firmware.h>
  73#include <asm/mce.h>
  74
  75/* powerpc clocksource/clockevent code */
  76
  77#include <linux/clockchips.h>
  78#include <linux/timekeeper_internal.h>
  79
  80static u64 timebase_read(struct clocksource *);
 
 
 
 
 
 
 
 
 
  81static struct clocksource clocksource_timebase = {
  82	.name         = "timebase",
  83	.rating       = 400,
  84	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
  85	.mask         = CLOCKSOURCE_MASK(64),
  86	.read         = timebase_read,
  87	.vdso_clock_mode	= VDSO_CLOCKMODE_ARCHTIMER,
  88};
  89
  90#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
  91u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
  92EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
  93
  94static int decrementer_set_next_event(unsigned long evt,
  95				      struct clock_event_device *dev);
  96static int decrementer_shutdown(struct clock_event_device *evt);
 
  97
  98struct clock_event_device decrementer_clockevent = {
  99	.name			= "decrementer",
 100	.rating			= 200,
 101	.irq			= 0,
 102	.set_next_event		= decrementer_set_next_event,
 103	.set_state_oneshot_stopped = decrementer_shutdown,
 104	.set_state_shutdown	= decrementer_shutdown,
 105	.tick_resume		= decrementer_shutdown,
 106	.features		= CLOCK_EVT_FEAT_ONESHOT |
 107				  CLOCK_EVT_FEAT_C3STOP,
 108};
 109EXPORT_SYMBOL(decrementer_clockevent);
 110
 111/*
 112 * This always puts next_tb beyond now, so the clock event will never fire
 113 * with the usual comparison, no need for a separate test for stopped.
 114 */
 115#define DEC_CLOCKEVENT_STOPPED ~0ULL
 116DEFINE_PER_CPU(u64, decrementers_next_tb) = DEC_CLOCKEVENT_STOPPED;
 117EXPORT_SYMBOL_GPL(decrementers_next_tb);
 118static DEFINE_PER_CPU(struct clock_event_device, decrementers);
 119
 120#define XSEC_PER_SEC (1024*1024)
 121
 122#ifdef CONFIG_PPC64
 123#define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
 124#else
 125/* compute ((xsec << 12) * max) >> 32 */
 126#define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
 127#endif
 128
 129unsigned long tb_ticks_per_jiffy;
 130unsigned long tb_ticks_per_usec = 100; /* sane default */
 131EXPORT_SYMBOL(tb_ticks_per_usec);
 132unsigned long tb_ticks_per_sec;
 133EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime conversions */
 134
 135DEFINE_SPINLOCK(rtc_lock);
 136EXPORT_SYMBOL_GPL(rtc_lock);
 137
 138static u64 tb_to_ns_scale __read_mostly;
 139static unsigned tb_to_ns_shift __read_mostly;
 140static u64 boot_tb __read_mostly;
 141
 142extern struct timezone sys_tz;
 143static long timezone_offset;
 144
 145unsigned long ppc_proc_freq;
 146EXPORT_SYMBOL_GPL(ppc_proc_freq);
 147unsigned long ppc_tb_freq;
 148EXPORT_SYMBOL_GPL(ppc_tb_freq);
 149
 150bool tb_invalid;
 151
 152#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 153/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154 * Read the SPURR on systems that have it, otherwise the PURR,
 155 * or if that doesn't exist return the timebase value passed in.
 156 */
 157static inline unsigned long read_spurr(unsigned long tb)
 158{
 159	if (cpu_has_feature(CPU_FTR_SPURR))
 160		return mfspr(SPRN_SPURR);
 161	if (cpu_has_feature(CPU_FTR_PURR))
 162		return mfspr(SPRN_PURR);
 163	return tb;
 164}
 165
 
 
 166/*
 167 * Account time for a transition between system, hard irq
 168 * or soft irq state.
 169 */
 170static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
 171					unsigned long now, unsigned long stime)
 172{
 173	unsigned long stime_scaled = 0;
 174#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 175	unsigned long nowscaled, deltascaled;
 176	unsigned long utime, utime_scaled;
 177
 178	nowscaled = read_spurr(now);
 179	deltascaled = nowscaled - acct->startspurr;
 180	acct->startspurr = nowscaled;
 181	utime = acct->utime - acct->utime_sspurr;
 182	acct->utime_sspurr = acct->utime;
 183
 184	/*
 185	 * Because we don't read the SPURR on every kernel entry/exit,
 186	 * deltascaled includes both user and system SPURR ticks.
 187	 * Apportion these ticks to system SPURR ticks and user
 188	 * SPURR ticks in the same ratio as the system time (delta)
 189	 * and user time (udelta) values obtained from the timebase
 190	 * over the same interval.  The system ticks get accounted here;
 191	 * the user ticks get saved up in paca->user_time_scaled to be
 192	 * used by account_process_tick.
 193	 */
 194	stime_scaled = stime;
 195	utime_scaled = utime;
 196	if (deltascaled != stime + utime) {
 197		if (utime) {
 198			stime_scaled = deltascaled * stime / (stime + utime);
 199			utime_scaled = deltascaled - stime_scaled;
 200		} else {
 201			stime_scaled = deltascaled;
 202		}
 
 
 
 
 
 
 
 
 
 203	}
 204	acct->utime_scaled += utime_scaled;
 205#endif
 206
 207	return stime_scaled;
 208}
 209
 210static unsigned long vtime_delta(struct cpu_accounting_data *acct,
 211				 unsigned long *stime_scaled,
 212				 unsigned long *steal_time)
 
 
 213{
 214	unsigned long now, stime;
 215
 216	WARN_ON_ONCE(!irqs_disabled());
 217
 218	now = mftb();
 219	stime = now - acct->starttime;
 220	acct->starttime = now;
 221
 222	*stime_scaled = vtime_delta_scaled(acct, now, stime);
 
 
 223
 224	if (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
 225			firmware_has_feature(FW_FEATURE_SPLPAR))
 226		*steal_time = pseries_calculate_stolen_time(now);
 227	else
 228		*steal_time = 0;
 229
 230	return stime;
 231}
 232
 233static void vtime_delta_kernel(struct cpu_accounting_data *acct,
 234			       unsigned long *stime, unsigned long *stime_scaled)
 235{
 236	unsigned long steal_time;
 237
 238	*stime = vtime_delta(acct, stime_scaled, &steal_time);
 239	*stime -= min(*stime, steal_time);
 240	acct->steal_time += steal_time;
 
 
 
 
 
 241}
 242
 243void vtime_account_kernel(struct task_struct *tsk)
 
 244{
 245	struct cpu_accounting_data *acct = get_accounting(tsk);
 246	unsigned long stime, stime_scaled;
 247
 248	vtime_delta_kernel(acct, &stime, &stime_scaled);
 249
 250	if (tsk->flags & PF_VCPU) {
 251		acct->gtime += stime;
 252#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 253		acct->utime_scaled += stime_scaled;
 254#endif
 255	} else {
 256		acct->stime += stime;
 257#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 258		acct->stime_scaled += stime_scaled;
 259#endif
 260	}
 261}
 262EXPORT_SYMBOL_GPL(vtime_account_kernel);
 263
 264void vtime_account_idle(struct task_struct *tsk)
 
 
 
 
 
 
 
 265{
 266	unsigned long stime, stime_scaled, steal_time;
 267	struct cpu_accounting_data *acct = get_accounting(tsk);
 268
 269	stime = vtime_delta(acct, &stime_scaled, &steal_time);
 270	acct->idle_time += stime + steal_time;
 271}
 272
 273static void vtime_account_irq_field(struct cpu_accounting_data *acct,
 274				    unsigned long *field)
 275{
 276	unsigned long stime, stime_scaled;
 
 
 
 
 
 
 
 
 
 277
 278	vtime_delta_kernel(acct, &stime, &stime_scaled);
 279	*field += stime;
 280#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 281	acct->stime_scaled += stime_scaled;
 282#endif
 283}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285void vtime_account_softirq(struct task_struct *tsk)
 286{
 287	struct cpu_accounting_data *acct = get_accounting(tsk);
 288	vtime_account_irq_field(acct, &acct->softirq_time);
 289}
 290
 291void vtime_account_hardirq(struct task_struct *tsk)
 292{
 293	struct cpu_accounting_data *acct = get_accounting(tsk);
 294	vtime_account_irq_field(acct, &acct->hardirq_time);
 
 
 
 
 295}
 
 296
 297static void vtime_flush_scaled(struct task_struct *tsk,
 298			       struct cpu_accounting_data *acct)
 299{
 300#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 301	if (acct->utime_scaled)
 302		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
 303	if (acct->stime_scaled)
 304		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
 305
 306	acct->utime_scaled = 0;
 307	acct->utime_sspurr = 0;
 308	acct->stime_scaled = 0;
 309#endif
 310}
 311
 312/*
 313 * Account the whole cputime accumulated in the paca
 
 
 314 * Must be called with interrupts disabled.
 315 * Assumes that vtime_account_kernel/idle() has been called
 316 * recently (i.e. since the last entry from usermode) so that
 317 * get_paca()->user_time_scaled is up to date.
 318 */
 319void vtime_flush(struct task_struct *tsk)
 320{
 321	struct cpu_accounting_data *acct = get_accounting(tsk);
 322
 323	if (acct->utime)
 324		account_user_time(tsk, cputime_to_nsecs(acct->utime));
 325
 326	if (acct->gtime)
 327		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
 328
 329	if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
 330		account_steal_time(cputime_to_nsecs(acct->steal_time));
 331		acct->steal_time = 0;
 332	}
 333
 334	if (acct->idle_time)
 335		account_idle_time(cputime_to_nsecs(acct->idle_time));
 336
 337	if (acct->stime)
 338		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
 339					  CPUTIME_SYSTEM);
 340
 341	if (acct->hardirq_time)
 342		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
 343					  CPUTIME_IRQ);
 344	if (acct->softirq_time)
 345		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
 346					  CPUTIME_SOFTIRQ);
 347
 348	vtime_flush_scaled(tsk, acct);
 349
 350	acct->utime = 0;
 351	acct->gtime = 0;
 352	acct->idle_time = 0;
 353	acct->stime = 0;
 354	acct->hardirq_time = 0;
 355	acct->softirq_time = 0;
 356}
 357#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 
 
 358
 359void __delay(unsigned long loops)
 360{
 361	unsigned long start;
 
 362
 363	spin_begin();
 364	if (tb_invalid) {
 365		/*
 366		 * TB is in error state and isn't ticking anymore.
 367		 * HMI handler was unable to recover from TB error.
 368		 * Return immediately, so that kernel won't get stuck here.
 369		 */
 370		spin_cpu_relax();
 371	} else {
 372		start = mftb();
 373		while (mftb() - start < loops)
 374			spin_cpu_relax();
 
 375	}
 376	spin_end();
 377}
 378EXPORT_SYMBOL(__delay);
 379
 380void udelay(unsigned long usecs)
 381{
 382	__delay(tb_ticks_per_usec * usecs);
 383}
 384EXPORT_SYMBOL(udelay);
 385
 386#ifdef CONFIG_SMP
 387unsigned long profile_pc(struct pt_regs *regs)
 388{
 389	unsigned long pc = instruction_pointer(regs);
 390
 391	if (in_lock_functions(pc))
 392		return regs->link;
 393
 394	return pc;
 395}
 396EXPORT_SYMBOL(profile_pc);
 397#endif
 398
 399#ifdef CONFIG_IRQ_WORK
 400
 401/*
 402 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
 403 */
 404#ifdef CONFIG_PPC64
 405static inline unsigned long test_irq_work_pending(void)
 406{
 407	unsigned long x;
 408
 409	asm volatile("lbz %0,%1(13)"
 410		: "=r" (x)
 411		: "i" (offsetof(struct paca_struct, irq_work_pending)));
 412	return x;
 413}
 414
 415static inline void set_irq_work_pending_flag(void)
 416{
 417	asm volatile("stb %0,%1(13)" : :
 418		"r" (1),
 419		"i" (offsetof(struct paca_struct, irq_work_pending)));
 420}
 421
 422static inline void clear_irq_work_pending(void)
 423{
 424	asm volatile("stb %0,%1(13)" : :
 425		"r" (0),
 426		"i" (offsetof(struct paca_struct, irq_work_pending)));
 427}
 428
 429#else /* 32-bit */
 430
 431DEFINE_PER_CPU(u8, irq_work_pending);
 432
 433#define set_irq_work_pending_flag()	__this_cpu_write(irq_work_pending, 1)
 434#define test_irq_work_pending()		__this_cpu_read(irq_work_pending)
 435#define clear_irq_work_pending()	__this_cpu_write(irq_work_pending, 0)
 436
 437#endif /* 32 vs 64 bit */
 438
 439void arch_irq_work_raise(void)
 440{
 441	/*
 442	 * 64-bit code that uses irq soft-mask can just cause an immediate
 443	 * interrupt here that gets soft masked, if this is called under
 444	 * local_irq_disable(). It might be possible to prevent that happening
 445	 * by noticing interrupts are disabled and setting decrementer pending
 446	 * to be replayed when irqs are enabled. The problem there is that
 447	 * tracing can call irq_work_raise, including in code that does low
 448	 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
 449	 * which could get tangled up if we're messing with the same state
 450	 * here.
 451	 */
 452	preempt_disable();
 453	set_irq_work_pending_flag();
 454	set_dec(1);
 455	preempt_enable();
 456}
 457
 458static void set_dec_or_work(u64 val)
 459{
 460	set_dec(val);
 461	/* We may have raced with new irq work */
 462	if (unlikely(test_irq_work_pending()))
 463		set_dec(1);
 464}
 465
 466#else  /* CONFIG_IRQ_WORK */
 467
 468#define test_irq_work_pending()	0
 469#define clear_irq_work_pending()
 470
 471static void set_dec_or_work(u64 val)
 472{
 473	set_dec(val);
 474}
 475#endif /* CONFIG_IRQ_WORK */
 476
 477#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 478void timer_rearm_host_dec(u64 now)
 479{
 480	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 
 
 
 481
 482	WARN_ON_ONCE(!arch_irqs_disabled());
 483	WARN_ON_ONCE(mfmsr() & MSR_EE);
 484
 
 
 
 
 
 
 485	if (now >= *next_tb) {
 486		local_paca->irq_happened |= PACA_IRQ_DEC;
 
 
 
 487	} else {
 488		now = *next_tb - now;
 489		if (now > decrementer_max)
 490			now = decrementer_max;
 491		set_dec_or_work(now);
 
 
 
 
 
 
 
 
 
 
 492	}
 493}
 494EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
 495#endif
 496
 
 
 
 497/*
 498 * timer_interrupt - gets called when the decrementer overflows,
 499 * with interrupts disabled.
 500 */
 501DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
 502{
 503	struct clock_event_device *evt = this_cpu_ptr(&decrementers);
 504	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
 505	struct pt_regs *old_regs;
 506	u64 now;
 507
 508	/*
 509	 * Some implementations of hotplug will get timer interrupts while
 510	 * offline, just ignore these.
 
 
 
 
 
 
 
 511	 */
 512	if (unlikely(!cpu_online(smp_processor_id()))) {
 513		set_dec(decrementer_max);
 514		return;
 515	}
 516
 517	/* Conditionally hard-enable interrupts. */
 518	if (should_hard_irq_enable(regs)) {
 519		/*
 520		 * Ensure a positive value is written to the decrementer, or
 521		 * else some CPUs will continue to take decrementer exceptions.
 522		 * When the PPC_WATCHDOG (decrementer based) is configured,
 523		 * keep this at most 31 bits, which is about 4 seconds on most
 524		 * systems, which gives the watchdog a chance of catching timer
 525		 * interrupt hard lockups.
 526		 */
 527		if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
 528			set_dec(0x7fffffff);
 529		else
 530			set_dec(decrementer_max);
 531
 532		do_hard_irq_enable();
 533	}
 534
 535#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
 536	if (atomic_read(&ppc_n_lost_interrupts) != 0)
 537		__do_IRQ(regs);
 538#endif
 539
 540	old_regs = set_irq_regs(regs);
 
 541
 542	trace_timer_interrupt_entry(regs);
 543
 544	if (test_irq_work_pending()) {
 545		clear_irq_work_pending();
 546		mce_run_irq_context_handlers();
 547		irq_work_run();
 548	}
 549
 550	now = get_tb();
 551	if (now >= *next_tb) {
 552		evt->event_handler(evt);
 553		__this_cpu_inc(irq_stat.timer_irqs_event);
 554	} else {
 555		now = *next_tb - now;
 556		if (now > decrementer_max)
 557			now = decrementer_max;
 558		set_dec_or_work(now);
 559		__this_cpu_inc(irq_stat.timer_irqs_others);
 560	}
 561
 562	trace_timer_interrupt_exit(regs);
 563
 564	set_irq_regs(old_regs);
 565}
 566EXPORT_SYMBOL(timer_interrupt);
 567
 568#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 569void timer_broadcast_interrupt(void)
 
 
 
 
 570{
 571	tick_receive_broadcast();
 572	__this_cpu_inc(irq_stat.broadcast_irqs_event);
 573}
 574#endif
 575
 576#ifdef CONFIG_SUSPEND
 577/* Overrides the weak version in kernel/power/main.c */
 578void arch_suspend_disable_irqs(void)
 579{
 580	if (ppc_md.suspend_disable_irqs)
 581		ppc_md.suspend_disable_irqs();
 582
 583	/* Disable the decrementer, so that it doesn't interfere
 584	 * with suspending.
 585	 */
 586
 587	set_dec(decrementer_max);
 588	local_irq_disable();
 589	set_dec(decrementer_max);
 590}
 591
 592/* Overrides the weak version in kernel/power/main.c */
 593void arch_suspend_enable_irqs(void)
 594{
 595	local_irq_enable();
 
 596
 597	if (ppc_md.suspend_enable_irqs)
 598		ppc_md.suspend_enable_irqs();
 
 
 
 
 599}
 600#endif
 601
 602unsigned long long tb_to_ns(unsigned long long ticks)
 
 603{
 604	return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
 
 
 605}
 606EXPORT_SYMBOL_GPL(tb_to_ns);
 607
 608/*
 609 * Scheduler clock - returns current time in nanosec units.
 610 *
 611 * Note: mulhdu(a, b) (multiply high double unsigned) returns
 612 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
 613 * are 64-bit unsigned numbers.
 614 */
 615notrace unsigned long long sched_clock(void)
 616{
 
 
 617	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 618}
 619
 620
 621#ifdef CONFIG_PPC_PSERIES
 622
 623/*
 624 * Running clock - attempts to give a view of time passing for a virtualised
 625 * kernels.
 626 * Uses the VTB register if available otherwise a next best guess.
 627 */
 628unsigned long long running_clock(void)
 629{
 630	/*
 631	 * Don't read the VTB as a host since KVM does not switch in host
 632	 * timebase into the VTB when it takes a guest off the CPU, reading the
 633	 * VTB would result in reading 'last switched out' guest VTB.
 634	 *
 635	 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
 636	 * would be unsafe to rely only on the #ifdef above.
 637	 */
 638	if (firmware_has_feature(FW_FEATURE_LPAR) &&
 639	    cpu_has_feature(CPU_FTR_ARCH_207S))
 640		return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 641
 642	/*
 643	 * This is a next best approximation without a VTB.
 644	 * On a host which is running bare metal there should never be any stolen
 645	 * time and on a host which doesn't do any virtualisation TB *should* equal
 646	 * VTB so it makes no difference anyway.
 647	 */
 648	return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
 649}
 650#endif
 651
 652static int __init get_freq(char *name, int cells, unsigned long *val)
 653{
 654	struct device_node *cpu;
 655	const __be32 *fp;
 656	int found = 0;
 657
 658	/* The cpu node should have timebase and clock frequency properties */
 659	cpu = of_find_node_by_type(NULL, "cpu");
 660
 661	if (cpu) {
 662		fp = of_get_property(cpu, name, NULL);
 663		if (fp) {
 664			found = 1;
 665			*val = of_read_ulong(fp, cells);
 666		}
 667
 668		of_node_put(cpu);
 669	}
 670
 671	return found;
 672}
 673
 674static void start_cpu_decrementer(void)
 675{
 676#ifdef CONFIG_BOOKE_OR_40x
 677	unsigned int tcr;
 678
 679	/* Clear any pending timer interrupts */
 680	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
 681
 682	tcr = mfspr(SPRN_TCR);
 683	/*
 684	 * The watchdog may have already been enabled by u-boot. So leave
 685	 * TRC[WP] (Watchdog Period) alone.
 686	 */
 687	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
 688	tcr |= TCR_DIE;		/* Enable decrementer */
 689	mtspr(SPRN_TCR, tcr);
 690#endif
 691}
 692
 693void __init generic_calibrate_decr(void)
 694{
 695	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
 696
 697	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
 698	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
 699
 700		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
 701				"(not found)\n");
 702	}
 703
 704	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
 705
 706	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
 707	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
 708
 709		printk(KERN_ERR "WARNING: Estimating processor frequency "
 710				"(not found)\n");
 711	}
 712}
 713
 714int update_persistent_clock64(struct timespec64 now)
 715{
 716	struct rtc_time tm;
 717
 718	if (!ppc_md.set_rtc_time)
 719		return -ENODEV;
 720
 721	rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
 
 
 722
 723	return ppc_md.set_rtc_time(&tm);
 724}
 725
 726static void __read_persistent_clock(struct timespec64 *ts)
 727{
 728	struct rtc_time tm;
 729	static int first = 1;
 730
 731	ts->tv_nsec = 0;
 732	/* XXX this is a little fragile but will work okay in the short term */
 733	if (first) {
 734		first = 0;
 735		if (ppc_md.time_init)
 736			timezone_offset = ppc_md.time_init();
 737
 738		/* get_boot_time() isn't guaranteed to be safe to call late */
 739		if (ppc_md.get_boot_time) {
 740			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
 741			return;
 742		}
 743	}
 744	if (!ppc_md.get_rtc_time) {
 745		ts->tv_sec = 0;
 746		return;
 747	}
 748	ppc_md.get_rtc_time(&tm);
 749
 750	ts->tv_sec = rtc_tm_to_time64(&tm);
 
 751}
 752
 753void read_persistent_clock64(struct timespec64 *ts)
 754{
 755	__read_persistent_clock(ts);
 756
 757	/* Sanitize it in case real time clock is set below EPOCH */
 758	if (ts->tv_sec < 0) {
 759		ts->tv_sec = 0;
 760		ts->tv_nsec = 0;
 761	}
 762		
 763}
 764
 765/* clocksource code */
 766static notrace u64 timebase_read(struct clocksource *cs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767{
 768	return (u64)get_tb();
 
 769}
 770
 771static void __init clocksource_init(void)
 772{
 773	struct clocksource *clock = &clocksource_timebase;
 
 
 
 
 
 774
 775	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
 776		printk(KERN_ERR "clocksource: %s is already registered\n",
 777		       clock->name);
 778		return;
 779	}
 780
 781	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
 782	       clock->name, clock->mult, clock->shift);
 783}
 784
 785static int decrementer_set_next_event(unsigned long evt,
 786				      struct clock_event_device *dev)
 787{
 788	__this_cpu_write(decrementers_next_tb, get_tb() + evt);
 789	set_dec_or_work(evt);
 
 
 
 
 790
 791	return 0;
 792}
 793
 794static int decrementer_shutdown(struct clock_event_device *dev)
 
 795{
 796	__this_cpu_write(decrementers_next_tb, DEC_CLOCKEVENT_STOPPED);
 797	set_dec_or_work(decrementer_max);
 
 798
 799	return 0;
 
 
 
 
 
 
 800}
 801
 802static void register_decrementer_clockevent(int cpu)
 803{
 804	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
 805
 806	*dec = decrementer_clockevent;
 807	dec->cpumask = cpumask_of(cpu);
 808
 809	clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
 810
 811	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
 812		    dec->name, dec->mult, dec->shift, cpu);
 813
 814	/* Set values for KVM, see kvm_emulate_dec() */
 815	decrementer_clockevent.mult = dec->mult;
 816	decrementer_clockevent.shift = dec->shift;
 817}
 818
 819static void enable_large_decrementer(void)
 820{
 821	if (!cpu_has_feature(CPU_FTR_ARCH_300))
 822		return;
 823
 824	if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
 825		return;
 826
 827	/*
 828	 * If we're running as the hypervisor we need to enable the LD manually
 829	 * otherwise firmware should have done it for us.
 830	 */
 831	if (cpu_has_feature(CPU_FTR_HVMODE))
 832		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
 833}
 834
 835static void __init set_decrementer_max(void)
 836{
 837	struct device_node *cpu;
 838	u32 bits = 32;
 839
 840	/* Prior to ISAv3 the decrementer is always 32 bit */
 841	if (!cpu_has_feature(CPU_FTR_ARCH_300))
 842		return;
 843
 844	cpu = of_find_node_by_type(NULL, "cpu");
 845
 846	if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
 847		if (bits > 64 || bits < 32) {
 848			pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
 849			bits = 32;
 850		}
 851
 852		/* calculate the signed maximum given this many bits */
 853		decrementer_max = (1ul << (bits - 1)) - 1;
 854	}
 855
 856	of_node_put(cpu);
 857
 858	pr_info("time_init: %u bit decrementer (max: %llx)\n",
 859		bits, decrementer_max);
 860}
 
 861
 862static void __init init_decrementer_clockevent(void)
 863{
 864	register_decrementer_clockevent(smp_processor_id());
 865}
 866
 867void secondary_cpu_time_init(void)
 868{
 869	/* Enable and test the large decrementer for this cpu */
 870	enable_large_decrementer();
 871
 872	/* Start the decrementer on CPUs that have manual control
 873	 * such as BookE
 874	 */
 875	start_cpu_decrementer();
 876
 877	/* FIME: Should make unrelated change to move snapshot_timebase
 878	 * call here ! */
 879	register_decrementer_clockevent(smp_processor_id());
 880}
 881
 882/* This function is only called on the boot processor */
 883void __init time_init(void)
 884{
 885	struct div_result res;
 886	u64 scale;
 887	unsigned shift;
 888
 889	/* Normal PowerPC with timebase register */
 890	ppc_md.calibrate_decr();
 891	printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
 892	       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
 893	printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
 894	       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
 
 
 
 
 
 895
 896	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
 897	tb_ticks_per_sec = ppc_tb_freq;
 898	tb_ticks_per_usec = ppc_tb_freq / 1000000;
 
 
 899
 900	/*
 901	 * Compute scale factor for sched_clock.
 902	 * The calibrate_decr() function has set tb_ticks_per_sec,
 903	 * which is the timebase frequency.
 904	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
 905	 * the 128-bit result as a 64.64 fixed-point number.
 906	 * We then shift that number right until it is less than 1.0,
 907	 * giving us the scale factor and shift count to use in
 908	 * sched_clock().
 909	 */
 910	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
 911	scale = res.result_low;
 912	for (shift = 0; res.result_high != 0; ++shift) {
 913		scale = (scale >> 1) | (res.result_high << 63);
 914		res.result_high >>= 1;
 915	}
 916	tb_to_ns_scale = scale;
 917	tb_to_ns_shift = shift;
 918	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
 919	boot_tb = get_tb();
 920
 921	/* If platform provided a timezone (pmac), we correct the time */
 922	if (timezone_offset) {
 923		sys_tz.tz_minuteswest = -timezone_offset / 60;
 924		sys_tz.tz_dsttime = 0;
 925	}
 926
 
 927	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
 928
 929	/* initialise and enable the large decrementer (if we have one) */
 930	set_decrementer_max();
 931	enable_large_decrementer();
 932
 933	/* Start the decrementer on CPUs that have manual control
 934	 * such as BookE
 935	 */
 936	start_cpu_decrementer();
 937
 938	/* Register the clocksource */
 939	clocksource_init();
 940
 941	init_decrementer_clockevent();
 942	tick_setup_hrtimer_broadcast();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943
 944	of_clk_init(NULL);
 945	enable_sched_clock_irqtime();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946}
 947
 948/*
 949 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
 950 * result.
 951 */
 952void div128_by_32(u64 dividend_high, u64 dividend_low,
 953		  unsigned divisor, struct div_result *dr)
 954{
 955	unsigned long a, b, c, d;
 956	unsigned long w, x, y, z;
 957	u64 ra, rb, rc;
 958
 959	a = dividend_high >> 32;
 960	b = dividend_high & 0xffffffff;
 961	c = dividend_low >> 32;
 962	d = dividend_low & 0xffffffff;
 963
 964	w = a / divisor;
 965	ra = ((u64)(a - (w * divisor)) << 32) + b;
 966
 967	rb = ((u64) do_div(ra, divisor) << 32) + c;
 968	x = ra;
 969
 970	rc = ((u64) do_div(rb, divisor) << 32) + d;
 971	y = rb;
 972
 973	do_div(rc, divisor);
 974	z = rc;
 975
 976	dr->result_high = ((u64)w << 32) + x;
 977	dr->result_low  = ((u64)y << 32) + z;
 978
 979}
 980
 981/* We don't need to calibrate delay, we use the CPU timebase for that */
 982void calibrate_delay(void)
 983{
 984	/* Some generic code (such as spinlock debug) use loops_per_jiffy
 985	 * as the number of __delay(1) in a jiffy, so make it so
 986	 */
 987	loops_per_jiffy = tb_ticks_per_jiffy;
 988}
 989
 990#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
 991static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
 992{
 993	ppc_md.get_rtc_time(tm);
 994	return 0;
 995}
 996
 997static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
 998{
 999	if (!ppc_md.set_rtc_time)
1000		return -EOPNOTSUPP;
1001
1002	if (ppc_md.set_rtc_time(tm) < 0)
1003		return -EOPNOTSUPP;
1004
1005	return 0;
1006}
1007
1008static const struct rtc_class_ops rtc_generic_ops = {
1009	.read_time = rtc_generic_get_time,
1010	.set_time = rtc_generic_set_time,
1011};
1012
1013static int __init rtc_init(void)
1014{
1015	struct platform_device *pdev;
1016
1017	if (!ppc_md.get_rtc_time)
1018		return -ENODEV;
1019
1020	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1021					     &rtc_generic_ops,
1022					     sizeof(rtc_generic_ops));
1023
1024	return PTR_ERR_OR_ZERO(pdev);
1025}
1026
1027device_initcall(rtc_init);
1028#endif
v3.15
 
   1/*
   2 * Common time routines among all ppc machines.
   3 *
   4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
   5 * Paul Mackerras' version and mine for PReP and Pmac.
   6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
   7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
   8 *
   9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  10 * to make clock more stable (2.4.0-test5). The only thing
  11 * that this code assumes is that the timebases have been synchronized
  12 * by firmware on SMP and are never stopped (never do sleep
  13 * on SMP then, nap and doze are OK).
  14 * 
  15 * Speeded up do_gettimeofday by getting rid of references to
  16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  17 *
  18 * TODO (not necessarily in this file):
  19 * - improve precision and reproducibility of timebase frequency
  20 * measurement at boot time.
  21 * - for astronomical applications: add a new function to get
  22 * non ambiguous timestamps even around leap seconds. This needs
  23 * a new timestamp format and a good name.
  24 *
  25 * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
  26 *             "A Kernel Model for Precision Timekeeping" by Dave Mills
  27 *
  28 *      This program is free software; you can redistribute it and/or
  29 *      modify it under the terms of the GNU General Public License
  30 *      as published by the Free Software Foundation; either version
  31 *      2 of the License, or (at your option) any later version.
  32 */
  33
  34#include <linux/errno.h>
  35#include <linux/export.h>
  36#include <linux/sched.h>
 
 
  37#include <linux/kernel.h>
  38#include <linux/param.h>
  39#include <linux/string.h>
  40#include <linux/mm.h>
  41#include <linux/interrupt.h>
  42#include <linux/timex.h>
  43#include <linux/kernel_stat.h>
  44#include <linux/time.h>
  45#include <linux/clockchips.h>
  46#include <linux/init.h>
  47#include <linux/profile.h>
  48#include <linux/cpu.h>
  49#include <linux/security.h>
  50#include <linux/percpu.h>
  51#include <linux/rtc.h>
  52#include <linux/jiffies.h>
  53#include <linux/posix-timers.h>
  54#include <linux/irq.h>
  55#include <linux/delay.h>
  56#include <linux/irq_work.h>
 
 
 
 
 
 
  57#include <asm/trace.h>
  58
  59#include <asm/io.h>
  60#include <asm/processor.h>
  61#include <asm/nvram.h>
  62#include <asm/cache.h>
  63#include <asm/machdep.h>
  64#include <asm/uaccess.h>
  65#include <asm/time.h>
  66#include <asm/prom.h>
  67#include <asm/irq.h>
  68#include <asm/div64.h>
  69#include <asm/smp.h>
  70#include <asm/vdso_datapage.h>
  71#include <asm/firmware.h>
  72#include <asm/cputime.h>
  73
  74/* powerpc clocksource/clockevent code */
  75
  76#include <linux/clockchips.h>
  77#include <linux/timekeeper_internal.h>
  78
  79static cycle_t rtc_read(struct clocksource *);
  80static struct clocksource clocksource_rtc = {
  81	.name         = "rtc",
  82	.rating       = 400,
  83	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
  84	.mask         = CLOCKSOURCE_MASK(64),
  85	.read         = rtc_read,
  86};
  87
  88static cycle_t timebase_read(struct clocksource *);
  89static struct clocksource clocksource_timebase = {
  90	.name         = "timebase",
  91	.rating       = 400,
  92	.flags        = CLOCK_SOURCE_IS_CONTINUOUS,
  93	.mask         = CLOCKSOURCE_MASK(64),
  94	.read         = timebase_read,
 
  95};
  96
  97#define DECREMENTER_MAX	0x7fffffff
 
 
  98
  99static int decrementer_set_next_event(unsigned long evt,
 100				      struct clock_event_device *dev);
 101static void decrementer_set_mode(enum clock_event_mode mode,
 102				 struct clock_event_device *dev);
 103
 104struct clock_event_device decrementer_clockevent = {
 105	.name           = "decrementer",
 106	.rating         = 200,
 107	.irq            = 0,
 108	.set_next_event = decrementer_set_next_event,
 109	.set_mode       = decrementer_set_mode,
 110	.features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
 
 
 
 111};
 112EXPORT_SYMBOL(decrementer_clockevent);
 113
 114DEFINE_PER_CPU(u64, decrementers_next_tb);
 
 
 
 
 
 
 115static DEFINE_PER_CPU(struct clock_event_device, decrementers);
 116
 117#define XSEC_PER_SEC (1024*1024)
 118
 119#ifdef CONFIG_PPC64
 120#define SCALE_XSEC(xsec, max)	(((xsec) * max) / XSEC_PER_SEC)
 121#else
 122/* compute ((xsec << 12) * max) >> 32 */
 123#define SCALE_XSEC(xsec, max)	mulhwu((xsec) << 12, max)
 124#endif
 125
 126unsigned long tb_ticks_per_jiffy;
 127unsigned long tb_ticks_per_usec = 100; /* sane default */
 128EXPORT_SYMBOL(tb_ticks_per_usec);
 129unsigned long tb_ticks_per_sec;
 130EXPORT_SYMBOL(tb_ticks_per_sec);	/* for cputime_t conversions */
 131
 132DEFINE_SPINLOCK(rtc_lock);
 133EXPORT_SYMBOL_GPL(rtc_lock);
 134
 135static u64 tb_to_ns_scale __read_mostly;
 136static unsigned tb_to_ns_shift __read_mostly;
 137static u64 boot_tb __read_mostly;
 138
 139extern struct timezone sys_tz;
 140static long timezone_offset;
 141
 142unsigned long ppc_proc_freq;
 143EXPORT_SYMBOL_GPL(ppc_proc_freq);
 144unsigned long ppc_tb_freq;
 145EXPORT_SYMBOL_GPL(ppc_tb_freq);
 146
 
 
 147#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 148/*
 149 * Factors for converting from cputime_t (timebase ticks) to
 150 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
 151 * These are all stored as 0.64 fixed-point binary fractions.
 152 */
 153u64 __cputime_jiffies_factor;
 154EXPORT_SYMBOL(__cputime_jiffies_factor);
 155u64 __cputime_usec_factor;
 156EXPORT_SYMBOL(__cputime_usec_factor);
 157u64 __cputime_sec_factor;
 158EXPORT_SYMBOL(__cputime_sec_factor);
 159u64 __cputime_clockt_factor;
 160EXPORT_SYMBOL(__cputime_clockt_factor);
 161DEFINE_PER_CPU(unsigned long, cputime_last_delta);
 162DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
 163
 164cputime_t cputime_one_jiffy;
 165
 166void (*dtl_consumer)(struct dtl_entry *, u64);
 167
 168static void calc_cputime_factors(void)
 169{
 170	struct div_result res;
 171
 172	div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
 173	__cputime_jiffies_factor = res.result_low;
 174	div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
 175	__cputime_usec_factor = res.result_low;
 176	div128_by_32(1, 0, tb_ticks_per_sec, &res);
 177	__cputime_sec_factor = res.result_low;
 178	div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
 179	__cputime_clockt_factor = res.result_low;
 180}
 181
 182/*
 183 * Read the SPURR on systems that have it, otherwise the PURR,
 184 * or if that doesn't exist return the timebase value passed in.
 185 */
 186static u64 read_spurr(u64 tb)
 187{
 188	if (cpu_has_feature(CPU_FTR_SPURR))
 189		return mfspr(SPRN_SPURR);
 190	if (cpu_has_feature(CPU_FTR_PURR))
 191		return mfspr(SPRN_PURR);
 192	return tb;
 193}
 194
 195#ifdef CONFIG_PPC_SPLPAR
 196
 197/*
 198 * Scan the dispatch trace log and count up the stolen time.
 199 * Should be called with interrupts disabled.
 200 */
 201static u64 scan_dispatch_log(u64 stop_tb)
 
 202{
 203	u64 i = local_paca->dtl_ridx;
 204	struct dtl_entry *dtl = local_paca->dtl_curr;
 205	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
 206	struct lppaca *vpa = local_paca->lppaca_ptr;
 207	u64 tb_delta;
 208	u64 stolen = 0;
 209	u64 dtb;
 210
 211	if (!dtl)
 212		return 0;
 213
 214	if (i == be64_to_cpu(vpa->dtl_idx))
 215		return 0;
 216	while (i < be64_to_cpu(vpa->dtl_idx)) {
 217		dtb = be64_to_cpu(dtl->timebase);
 218		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
 219			be32_to_cpu(dtl->ready_to_enqueue_time);
 220		barrier();
 221		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
 222			/* buffer has overflowed */
 223			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
 224			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
 225			continue;
 
 
 
 
 
 
 226		}
 227		if (dtb > stop_tb)
 228			break;
 229		if (dtl_consumer)
 230			dtl_consumer(dtl, i);
 231		stolen += tb_delta;
 232		++i;
 233		++dtl;
 234		if (dtl == dtl_end)
 235			dtl = local_paca->dispatch_log;
 236	}
 237	local_paca->dtl_ridx = i;
 238	local_paca->dtl_curr = dtl;
 239	return stolen;
 
 240}
 241
 242/*
 243 * Accumulate stolen time by scanning the dispatch trace log.
 244 * Called on entry from user mode.
 245 */
 246void accumulate_stolen_time(void)
 247{
 248	u64 sst, ust;
 249
 250	u8 save_soft_enabled = local_paca->soft_enabled;
 251
 252	/* We are called early in the exception entry, before
 253	 * soft/hard_enabled are sync'ed to the expected state
 254	 * for the exception. We are hard disabled but the PACA
 255	 * needs to reflect that so various debug stuff doesn't
 256	 * complain
 257	 */
 258	local_paca->soft_enabled = 0;
 259
 260	sst = scan_dispatch_log(local_paca->starttime_user);
 261	ust = scan_dispatch_log(local_paca->starttime);
 262	local_paca->system_time -= sst;
 263	local_paca->user_time -= ust;
 264	local_paca->stolen_time += ust + sst;
 265
 266	local_paca->soft_enabled = save_soft_enabled;
 267}
 268
 269static inline u64 calculate_stolen_time(u64 stop_tb)
 
 270{
 271	u64 stolen = 0;
 272
 273	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
 274		stolen = scan_dispatch_log(stop_tb);
 275		get_paca()->system_time -= stolen;
 276	}
 277
 278	stolen += get_paca()->stolen_time;
 279	get_paca()->stolen_time = 0;
 280	return stolen;
 281}
 282
 283#else /* CONFIG_PPC_SPLPAR */
 284static inline u64 calculate_stolen_time(u64 stop_tb)
 285{
 286	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287}
 
 288
 289#endif /* CONFIG_PPC_SPLPAR */
 290
 291/*
 292 * Account time for a transition between system, hard irq
 293 * or soft irq state.
 294 */
 295static u64 vtime_delta(struct task_struct *tsk,
 296			u64 *sys_scaled, u64 *stolen)
 297{
 298	u64 now, nowscaled, deltascaled;
 299	u64 udelta, delta, user_scaled;
 300
 301	WARN_ON_ONCE(!irqs_disabled());
 
 
 302
 303	now = mftb();
 304	nowscaled = read_spurr(now);
 305	get_paca()->system_time += now - get_paca()->starttime;
 306	get_paca()->starttime = now;
 307	deltascaled = nowscaled - get_paca()->startspurr;
 308	get_paca()->startspurr = nowscaled;
 309
 310	*stolen = calculate_stolen_time(now);
 311
 312	delta = get_paca()->system_time;
 313	get_paca()->system_time = 0;
 314	udelta = get_paca()->user_time - get_paca()->utime_sspurr;
 315	get_paca()->utime_sspurr = get_paca()->user_time;
 316
 317	/*
 318	 * Because we don't read the SPURR on every kernel entry/exit,
 319	 * deltascaled includes both user and system SPURR ticks.
 320	 * Apportion these ticks to system SPURR ticks and user
 321	 * SPURR ticks in the same ratio as the system time (delta)
 322	 * and user time (udelta) values obtained from the timebase
 323	 * over the same interval.  The system ticks get accounted here;
 324	 * the user ticks get saved up in paca->user_time_scaled to be
 325	 * used by account_process_tick.
 326	 */
 327	*sys_scaled = delta;
 328	user_scaled = udelta;
 329	if (deltascaled != delta + udelta) {
 330		if (udelta) {
 331			*sys_scaled = deltascaled * delta / (delta + udelta);
 332			user_scaled = deltascaled - *sys_scaled;
 333		} else {
 334			*sys_scaled = deltascaled;
 335		}
 336	}
 337	get_paca()->user_time_scaled += user_scaled;
 338
 339	return delta;
 
 
 
 340}
 341
 342void vtime_account_system(struct task_struct *tsk)
 343{
 344	u64 delta, sys_scaled, stolen;
 345
 346	delta = vtime_delta(tsk, &sys_scaled, &stolen);
 347	account_system_time(tsk, 0, delta, sys_scaled);
 348	if (stolen)
 349		account_steal_time(stolen);
 350}
 351EXPORT_SYMBOL_GPL(vtime_account_system);
 352
 353void vtime_account_idle(struct task_struct *tsk)
 
 354{
 355	u64 delta, sys_scaled, stolen;
 
 
 
 
 356
 357	delta = vtime_delta(tsk, &sys_scaled, &stolen);
 358	account_idle_time(delta + stolen);
 
 
 359}
 360
 361/*
 362 * Transfer the user time accumulated in the paca
 363 * by the exception entry and exit code to the generic
 364 * process user time records.
 365 * Must be called with interrupts disabled.
 366 * Assumes that vtime_account_system/idle() has been called
 367 * recently (i.e. since the last entry from usermode) so that
 368 * get_paca()->user_time_scaled is up to date.
 369 */
 370void vtime_account_user(struct task_struct *tsk)
 371{
 372	cputime_t utime, utimescaled;
 
 
 
 
 
 
 
 
 
 
 
 373
 374	utime = get_paca()->user_time;
 375	utimescaled = get_paca()->user_time_scaled;
 376	get_paca()->user_time = 0;
 377	get_paca()->user_time_scaled = 0;
 378	get_paca()->utime_sspurr = 0;
 379	account_user_time(tsk, utime, utimescaled);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380}
 381
 382#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 383#define calc_cputime_factors()
 384#endif
 385
 386void __delay(unsigned long loops)
 387{
 388	unsigned long start;
 389	int diff;
 390
 391	if (__USE_RTC()) {
 392		start = get_rtcl();
 393		do {
 394			/* the RTCL register wraps at 1000000000 */
 395			diff = get_rtcl() - start;
 396			if (diff < 0)
 397				diff += 1000000000;
 398		} while (diff < loops);
 399	} else {
 400		start = get_tbl();
 401		while (get_tbl() - start < loops)
 402			HMT_low();
 403		HMT_medium();
 404	}
 
 405}
 406EXPORT_SYMBOL(__delay);
 407
 408void udelay(unsigned long usecs)
 409{
 410	__delay(tb_ticks_per_usec * usecs);
 411}
 412EXPORT_SYMBOL(udelay);
 413
 414#ifdef CONFIG_SMP
 415unsigned long profile_pc(struct pt_regs *regs)
 416{
 417	unsigned long pc = instruction_pointer(regs);
 418
 419	if (in_lock_functions(pc))
 420		return regs->link;
 421
 422	return pc;
 423}
 424EXPORT_SYMBOL(profile_pc);
 425#endif
 426
 427#ifdef CONFIG_IRQ_WORK
 428
 429/*
 430 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
 431 */
 432#ifdef CONFIG_PPC64
 433static inline unsigned long test_irq_work_pending(void)
 434{
 435	unsigned long x;
 436
 437	asm volatile("lbz %0,%1(13)"
 438		: "=r" (x)
 439		: "i" (offsetof(struct paca_struct, irq_work_pending)));
 440	return x;
 441}
 442
 443static inline void set_irq_work_pending_flag(void)
 444{
 445	asm volatile("stb %0,%1(13)" : :
 446		"r" (1),
 447		"i" (offsetof(struct paca_struct, irq_work_pending)));
 448}
 449
 450static inline void clear_irq_work_pending(void)
 451{
 452	asm volatile("stb %0,%1(13)" : :
 453		"r" (0),
 454		"i" (offsetof(struct paca_struct, irq_work_pending)));
 455}
 456
 457#else /* 32-bit */
 458
 459DEFINE_PER_CPU(u8, irq_work_pending);
 460
 461#define set_irq_work_pending_flag()	__get_cpu_var(irq_work_pending) = 1
 462#define test_irq_work_pending()		__get_cpu_var(irq_work_pending)
 463#define clear_irq_work_pending()	__get_cpu_var(irq_work_pending) = 0
 464
 465#endif /* 32 vs 64 bit */
 466
 467void arch_irq_work_raise(void)
 468{
 
 
 
 
 
 
 
 
 
 
 
 469	preempt_disable();
 470	set_irq_work_pending_flag();
 471	set_dec(1);
 472	preempt_enable();
 473}
 474
 
 
 
 
 
 
 
 
 475#else  /* CONFIG_IRQ_WORK */
 476
 477#define test_irq_work_pending()	0
 478#define clear_irq_work_pending()
 479
 
 
 
 
 480#endif /* CONFIG_IRQ_WORK */
 481
 482void __timer_interrupt(void)
 
 483{
 484	struct pt_regs *regs = get_irq_regs();
 485	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
 486	struct clock_event_device *evt = &__get_cpu_var(decrementers);
 487	u64 now;
 488
 489	trace_timer_interrupt_entry(regs);
 
 490
 491	if (test_irq_work_pending()) {
 492		clear_irq_work_pending();
 493		irq_work_run();
 494	}
 495
 496	now = get_tb_or_rtc();
 497	if (now >= *next_tb) {
 498		*next_tb = ~(u64)0;
 499		if (evt->event_handler)
 500			evt->event_handler(evt);
 501		__get_cpu_var(irq_stat).timer_irqs_event++;
 502	} else {
 503		now = *next_tb - now;
 504		if (now <= DECREMENTER_MAX)
 505			set_dec((int)now);
 506		/* We may have raced with new irq work */
 507		if (test_irq_work_pending())
 508			set_dec(1);
 509		__get_cpu_var(irq_stat).timer_irqs_others++;
 510	}
 511
 512#ifdef CONFIG_PPC64
 513	/* collect purr register values often, for accurate calculations */
 514	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
 515		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
 516		cu->current_tb = mfspr(SPRN_PURR);
 517	}
 
 
 518#endif
 519
 520	trace_timer_interrupt_exit(regs);
 521}
 522
 523/*
 524 * timer_interrupt - gets called when the decrementer overflows,
 525 * with interrupts disabled.
 526 */
 527void timer_interrupt(struct pt_regs * regs)
 528{
 
 
 529	struct pt_regs *old_regs;
 530	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
 531
 532	/* Ensure a positive value is written to the decrementer, or else
 533	 * some CPUs will continue to take decrementer exceptions.
 534	 */
 535	set_dec(DECREMENTER_MAX);
 536
 537	/* Some implementations of hotplug will get timer interrupts while
 538	 * offline, just ignore these and we also need to set
 539	 * decrementers_next_tb as MAX to make sure __check_irq_replay
 540	 * don't replay timer interrupt when return, otherwise we'll trap
 541	 * here infinitely :(
 542	 */
 543	if (!cpu_online(smp_processor_id())) {
 544		*next_tb = ~(u64)0;
 545		return;
 546	}
 547
 548	/* Conditionally hard-enable interrupts now that the DEC has been
 549	 * bumped to its maximum value
 550	 */
 551	may_hard_irq_enable();
 
 
 
 
 
 
 
 
 
 
 552
 
 
 553
 554#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
 555	if (atomic_read(&ppc_n_lost_interrupts) != 0)
 556		do_IRQ(regs);
 557#endif
 558
 559	old_regs = set_irq_regs(regs);
 560	irq_enter();
 561
 562	__timer_interrupt();
 563	irq_exit();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564	set_irq_regs(old_regs);
 565}
 
 566
 567/*
 568 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
 569 * left pending on exit from a KVM guest.  We don't need to do anything
 570 * to clear them, as they are edge-triggered.
 571 */
 572void hdec_interrupt(struct pt_regs *regs)
 573{
 
 
 574}
 
 575
 576#ifdef CONFIG_SUSPEND
 577static void generic_suspend_disable_irqs(void)
 
 578{
 
 
 
 579	/* Disable the decrementer, so that it doesn't interfere
 580	 * with suspending.
 581	 */
 582
 583	set_dec(DECREMENTER_MAX);
 584	local_irq_disable();
 585	set_dec(DECREMENTER_MAX);
 586}
 587
 588static void generic_suspend_enable_irqs(void)
 
 589{
 590	local_irq_enable();
 591}
 592
 593/* Overrides the weak version in kernel/power/main.c */
 594void arch_suspend_disable_irqs(void)
 595{
 596	if (ppc_md.suspend_disable_irqs)
 597		ppc_md.suspend_disable_irqs();
 598	generic_suspend_disable_irqs();
 599}
 
 600
 601/* Overrides the weak version in kernel/power/main.c */
 602void arch_suspend_enable_irqs(void)
 603{
 604	generic_suspend_enable_irqs();
 605	if (ppc_md.suspend_enable_irqs)
 606		ppc_md.suspend_enable_irqs();
 607}
 608#endif
 609
 610/*
 611 * Scheduler clock - returns current time in nanosec units.
 612 *
 613 * Note: mulhdu(a, b) (multiply high double unsigned) returns
 614 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
 615 * are 64-bit unsigned numbers.
 616 */
 617unsigned long long sched_clock(void)
 618{
 619	if (__USE_RTC())
 620		return get_rtc();
 621	return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
 622}
 623
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624static int __init get_freq(char *name, int cells, unsigned long *val)
 625{
 626	struct device_node *cpu;
 627	const __be32 *fp;
 628	int found = 0;
 629
 630	/* The cpu node should have timebase and clock frequency properties */
 631	cpu = of_find_node_by_type(NULL, "cpu");
 632
 633	if (cpu) {
 634		fp = of_get_property(cpu, name, NULL);
 635		if (fp) {
 636			found = 1;
 637			*val = of_read_ulong(fp, cells);
 638		}
 639
 640		of_node_put(cpu);
 641	}
 642
 643	return found;
 644}
 645
 646void start_cpu_decrementer(void)
 647{
 648#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 
 
 649	/* Clear any pending timer interrupts */
 650	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
 651
 652	/* Enable decrementer interrupt */
 653	mtspr(SPRN_TCR, TCR_DIE);
 654#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
 
 
 
 
 
 
 655}
 656
 657void __init generic_calibrate_decr(void)
 658{
 659	ppc_tb_freq = DEFAULT_TB_FREQ;		/* hardcoded default */
 660
 661	if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
 662	    !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
 663
 664		printk(KERN_ERR "WARNING: Estimating decrementer frequency "
 665				"(not found)\n");
 666	}
 667
 668	ppc_proc_freq = DEFAULT_PROC_FREQ;	/* hardcoded default */
 669
 670	if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
 671	    !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
 672
 673		printk(KERN_ERR "WARNING: Estimating processor frequency "
 674				"(not found)\n");
 675	}
 676}
 677
 678int update_persistent_clock(struct timespec now)
 679{
 680	struct rtc_time tm;
 681
 682	if (!ppc_md.set_rtc_time)
 683		return -ENODEV;
 684
 685	to_tm(now.tv_sec + 1 + timezone_offset, &tm);
 686	tm.tm_year -= 1900;
 687	tm.tm_mon -= 1;
 688
 689	return ppc_md.set_rtc_time(&tm);
 690}
 691
 692static void __read_persistent_clock(struct timespec *ts)
 693{
 694	struct rtc_time tm;
 695	static int first = 1;
 696
 697	ts->tv_nsec = 0;
 698	/* XXX this is a litle fragile but will work okay in the short term */
 699	if (first) {
 700		first = 0;
 701		if (ppc_md.time_init)
 702			timezone_offset = ppc_md.time_init();
 703
 704		/* get_boot_time() isn't guaranteed to be safe to call late */
 705		if (ppc_md.get_boot_time) {
 706			ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
 707			return;
 708		}
 709	}
 710	if (!ppc_md.get_rtc_time) {
 711		ts->tv_sec = 0;
 712		return;
 713	}
 714	ppc_md.get_rtc_time(&tm);
 715
 716	ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
 717			    tm.tm_hour, tm.tm_min, tm.tm_sec);
 718}
 719
 720void read_persistent_clock(struct timespec *ts)
 721{
 722	__read_persistent_clock(ts);
 723
 724	/* Sanitize it in case real time clock is set below EPOCH */
 725	if (ts->tv_sec < 0) {
 726		ts->tv_sec = 0;
 727		ts->tv_nsec = 0;
 728	}
 729		
 730}
 731
 732/* clocksource code */
 733static cycle_t rtc_read(struct clocksource *cs)
 734{
 735	return (cycle_t)get_rtc();
 736}
 737
 738static cycle_t timebase_read(struct clocksource *cs)
 739{
 740	return (cycle_t)get_tb();
 741}
 742
 743void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
 744			struct clocksource *clock, u32 mult)
 745{
 746	u64 new_tb_to_xs, new_stamp_xsec;
 747	u32 frac_sec;
 748
 749	if (clock != &clocksource_timebase)
 750		return;
 751
 752	/* Make userspace gettimeofday spin until we're done. */
 753	++vdso_data->tb_update_count;
 754	smp_mb();
 755
 756	/* 19342813113834067 ~= 2^(20+64) / 1e9 */
 757	new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
 758	new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
 759	do_div(new_stamp_xsec, 1000000000);
 760	new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
 761
 762	BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
 763	/* this is tv_nsec / 1e9 as a 0.32 fraction */
 764	frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
 765
 766	/*
 767	 * tb_update_count is used to allow the userspace gettimeofday code
 768	 * to assure itself that it sees a consistent view of the tb_to_xs and
 769	 * stamp_xsec variables.  It reads the tb_update_count, then reads
 770	 * tb_to_xs and stamp_xsec and then reads tb_update_count again.  If
 771	 * the two values of tb_update_count match and are even then the
 772	 * tb_to_xs and stamp_xsec values are consistent.  If not, then it
 773	 * loops back and reads them again until this criteria is met.
 774	 * We expect the caller to have done the first increment of
 775	 * vdso_data->tb_update_count already.
 776	 */
 777	vdso_data->tb_orig_stamp = clock->cycle_last;
 778	vdso_data->stamp_xsec = new_stamp_xsec;
 779	vdso_data->tb_to_xs = new_tb_to_xs;
 780	vdso_data->wtom_clock_sec = wtm->tv_sec;
 781	vdso_data->wtom_clock_nsec = wtm->tv_nsec;
 782	vdso_data->stamp_xtime = *wall_time;
 783	vdso_data->stamp_sec_fraction = frac_sec;
 784	smp_wmb();
 785	++(vdso_data->tb_update_count);
 786}
 787
 788void update_vsyscall_tz(void)
 789{
 790	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
 791	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
 792}
 793
 794static void __init clocksource_init(void)
 795{
 796	struct clocksource *clock;
 797
 798	if (__USE_RTC())
 799		clock = &clocksource_rtc;
 800	else
 801		clock = &clocksource_timebase;
 802
 803	if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
 804		printk(KERN_ERR "clocksource: %s is already registered\n",
 805		       clock->name);
 806		return;
 807	}
 808
 809	printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
 810	       clock->name, clock->mult, clock->shift);
 811}
 812
 813static int decrementer_set_next_event(unsigned long evt,
 814				      struct clock_event_device *dev)
 815{
 816	__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
 817	set_dec(evt);
 818
 819	/* We may have raced with new irq work */
 820	if (test_irq_work_pending())
 821		set_dec(1);
 822
 823	return 0;
 824}
 825
 826static void decrementer_set_mode(enum clock_event_mode mode,
 827				 struct clock_event_device *dev)
 828{
 829	if (mode != CLOCK_EVT_MODE_ONESHOT)
 830		decrementer_set_next_event(DECREMENTER_MAX, dev);
 831}
 832
 833/* Interrupt handler for the timer broadcast IPI */
 834void tick_broadcast_ipi_handler(void)
 835{
 836	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
 837
 838	*next_tb = get_tb_or_rtc();
 839	__timer_interrupt();
 840}
 841
 842static void register_decrementer_clockevent(int cpu)
 843{
 844	struct clock_event_device *dec = &per_cpu(decrementers, cpu);
 845
 846	*dec = decrementer_clockevent;
 847	dec->cpumask = cpumask_of(cpu);
 848
 
 
 849	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
 850		    dec->name, dec->mult, dec->shift, cpu);
 851
 852	clockevents_register_device(dec);
 
 
 853}
 854
 855static void __init init_decrementer_clockevent(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856{
 857	int cpu = smp_processor_id();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858
 859	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
 860
 861	decrementer_clockevent.max_delta_ns =
 862		clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
 863	decrementer_clockevent.min_delta_ns =
 864		clockevent_delta2ns(2, &decrementer_clockevent);
 865
 866	register_decrementer_clockevent(cpu);
 
 
 867}
 868
 869void secondary_cpu_time_init(void)
 870{
 
 
 
 871	/* Start the decrementer on CPUs that have manual control
 872	 * such as BookE
 873	 */
 874	start_cpu_decrementer();
 875
 876	/* FIME: Should make unrelatred change to move snapshot_timebase
 877	 * call here ! */
 878	register_decrementer_clockevent(smp_processor_id());
 879}
 880
 881/* This function is only called on the boot processor */
 882void __init time_init(void)
 883{
 884	struct div_result res;
 885	u64 scale;
 886	unsigned shift;
 887
 888	if (__USE_RTC()) {
 889		/* 601 processor: dec counts down by 128 every 128ns */
 890		ppc_tb_freq = 1000000000;
 891	} else {
 892		/* Normal PowerPC with timebase register */
 893		ppc_md.calibrate_decr();
 894		printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
 895		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
 896		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
 897		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
 898	}
 899
 900	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
 901	tb_ticks_per_sec = ppc_tb_freq;
 902	tb_ticks_per_usec = ppc_tb_freq / 1000000;
 903	calc_cputime_factors();
 904	setup_cputime_one_jiffy();
 905
 906	/*
 907	 * Compute scale factor for sched_clock.
 908	 * The calibrate_decr() function has set tb_ticks_per_sec,
 909	 * which is the timebase frequency.
 910	 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
 911	 * the 128-bit result as a 64.64 fixed-point number.
 912	 * We then shift that number right until it is less than 1.0,
 913	 * giving us the scale factor and shift count to use in
 914	 * sched_clock().
 915	 */
 916	div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
 917	scale = res.result_low;
 918	for (shift = 0; res.result_high != 0; ++shift) {
 919		scale = (scale >> 1) | (res.result_high << 63);
 920		res.result_high >>= 1;
 921	}
 922	tb_to_ns_scale = scale;
 923	tb_to_ns_shift = shift;
 924	/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
 925	boot_tb = get_tb_or_rtc();
 926
 927	/* If platform provided a timezone (pmac), we correct the time */
 928	if (timezone_offset) {
 929		sys_tz.tz_minuteswest = -timezone_offset / 60;
 930		sys_tz.tz_dsttime = 0;
 931	}
 932
 933	vdso_data->tb_update_count = 0;
 934	vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
 935
 
 
 
 
 936	/* Start the decrementer on CPUs that have manual control
 937	 * such as BookE
 938	 */
 939	start_cpu_decrementer();
 940
 941	/* Register the clocksource */
 942	clocksource_init();
 943
 944	init_decrementer_clockevent();
 945	tick_setup_hrtimer_broadcast();
 946}
 947
 948
 949#define FEBRUARY	2
 950#define	STARTOFTIME	1970
 951#define SECDAY		86400L
 952#define SECYR		(SECDAY * 365)
 953#define	leapyear(year)		((year) % 4 == 0 && \
 954				 ((year) % 100 != 0 || (year) % 400 == 0))
 955#define	days_in_year(a) 	(leapyear(a) ? 366 : 365)
 956#define	days_in_month(a) 	(month_days[(a) - 1])
 957
 958static int month_days[12] = {
 959	31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
 960};
 961
 962/*
 963 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
 964 */
 965void GregorianDay(struct rtc_time * tm)
 966{
 967	int leapsToDate;
 968	int lastYear;
 969	int day;
 970	int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
 971
 972	lastYear = tm->tm_year - 1;
 973
 974	/*
 975	 * Number of leap corrections to apply up to end of last year
 976	 */
 977	leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
 978
 979	/*
 980	 * This year is a leap year if it is divisible by 4 except when it is
 981	 * divisible by 100 unless it is divisible by 400
 982	 *
 983	 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
 984	 */
 985	day = tm->tm_mon > 2 && leapyear(tm->tm_year);
 986
 987	day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
 988		   tm->tm_mday;
 989
 990	tm->tm_wday = day % 7;
 991}
 992
 993void to_tm(int tim, struct rtc_time * tm)
 994{
 995	register int    i;
 996	register long   hms, day;
 997
 998	day = tim / SECDAY;
 999	hms = tim % SECDAY;
1000
1001	/* Hours, minutes, seconds are easy */
1002	tm->tm_hour = hms / 3600;
1003	tm->tm_min = (hms % 3600) / 60;
1004	tm->tm_sec = (hms % 3600) % 60;
1005
1006	/* Number of years in days */
1007	for (i = STARTOFTIME; day >= days_in_year(i); i++)
1008		day -= days_in_year(i);
1009	tm->tm_year = i;
1010
1011	/* Number of months in days left */
1012	if (leapyear(tm->tm_year))
1013		days_in_month(FEBRUARY) = 29;
1014	for (i = 1; day >= days_in_month(i); i++)
1015		day -= days_in_month(i);
1016	days_in_month(FEBRUARY) = 28;
1017	tm->tm_mon = i;
1018
1019	/* Days are what is left over (+1) from all that. */
1020	tm->tm_mday = day + 1;
1021
1022	/*
1023	 * Determine the day of week
1024	 */
1025	GregorianDay(tm);
1026}
1027
1028/*
1029 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1030 * result.
1031 */
1032void div128_by_32(u64 dividend_high, u64 dividend_low,
1033		  unsigned divisor, struct div_result *dr)
1034{
1035	unsigned long a, b, c, d;
1036	unsigned long w, x, y, z;
1037	u64 ra, rb, rc;
1038
1039	a = dividend_high >> 32;
1040	b = dividend_high & 0xffffffff;
1041	c = dividend_low >> 32;
1042	d = dividend_low & 0xffffffff;
1043
1044	w = a / divisor;
1045	ra = ((u64)(a - (w * divisor)) << 32) + b;
1046
1047	rb = ((u64) do_div(ra, divisor) << 32) + c;
1048	x = ra;
1049
1050	rc = ((u64) do_div(rb, divisor) << 32) + d;
1051	y = rb;
1052
1053	do_div(rc, divisor);
1054	z = rc;
1055
1056	dr->result_high = ((u64)w << 32) + x;
1057	dr->result_low  = ((u64)y << 32) + z;
1058
1059}
1060
1061/* We don't need to calibrate delay, we use the CPU timebase for that */
1062void calibrate_delay(void)
1063{
1064	/* Some generic code (such as spinlock debug) use loops_per_jiffy
1065	 * as the number of __delay(1) in a jiffy, so make it so
1066	 */
1067	loops_per_jiffy = tb_ticks_per_jiffy;
1068}
1069
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070static int __init rtc_init(void)
1071{
1072	struct platform_device *pdev;
1073
1074	if (!ppc_md.get_rtc_time)
1075		return -ENODEV;
1076
1077	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
 
 
1078
1079	return PTR_ERR_OR_ZERO(pdev);
1080}
1081
1082module_init(rtc_init);