Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Common time routines among all ppc machines.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6 * Paul Mackerras' version and mine for PReP and Pmac.
7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 *
10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
15 *
16 * Speeded up do_gettimeofday by getting rid of references to
17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 *
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time.
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 */
29
30#include <linux/errno.h>
31#include <linux/export.h>
32#include <linux/sched.h>
33#include <linux/sched/clock.h>
34#include <linux/kernel.h>
35#include <linux/param.h>
36#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/interrupt.h>
39#include <linux/timex.h>
40#include <linux/kernel_stat.h>
41#include <linux/time.h>
42#include <linux/init.h>
43#include <linux/profile.h>
44#include <linux/cpu.h>
45#include <linux/security.h>
46#include <linux/percpu.h>
47#include <linux/rtc.h>
48#include <linux/jiffies.h>
49#include <linux/posix-timers.h>
50#include <linux/irq.h>
51#include <linux/delay.h>
52#include <linux/irq_work.h>
53#include <linux/of_clk.h>
54#include <linux/suspend.h>
55#include <linux/sched/cputime.h>
56#include <linux/sched/clock.h>
57#include <linux/processor.h>
58#include <asm/trace.h>
59
60#include <asm/interrupt.h>
61#include <asm/io.h>
62#include <asm/nvram.h>
63#include <asm/cache.h>
64#include <asm/machdep.h>
65#include <linux/uaccess.h>
66#include <asm/time.h>
67#include <asm/prom.h>
68#include <asm/irq.h>
69#include <asm/div64.h>
70#include <asm/smp.h>
71#include <asm/vdso_datapage.h>
72#include <asm/firmware.h>
73#include <asm/asm-prototypes.h>
74
75/* powerpc clocksource/clockevent code */
76
77#include <linux/clockchips.h>
78#include <linux/timekeeper_internal.h>
79
80static u64 timebase_read(struct clocksource *);
81static struct clocksource clocksource_timebase = {
82 .name = "timebase",
83 .rating = 400,
84 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
85 .mask = CLOCKSOURCE_MASK(64),
86 .read = timebase_read,
87 .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
88};
89
90#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
91u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
92
93static int decrementer_set_next_event(unsigned long evt,
94 struct clock_event_device *dev);
95static int decrementer_shutdown(struct clock_event_device *evt);
96
97struct clock_event_device decrementer_clockevent = {
98 .name = "decrementer",
99 .rating = 200,
100 .irq = 0,
101 .set_next_event = decrementer_set_next_event,
102 .set_state_oneshot_stopped = decrementer_shutdown,
103 .set_state_shutdown = decrementer_shutdown,
104 .tick_resume = decrementer_shutdown,
105 .features = CLOCK_EVT_FEAT_ONESHOT |
106 CLOCK_EVT_FEAT_C3STOP,
107};
108EXPORT_SYMBOL(decrementer_clockevent);
109
110DEFINE_PER_CPU(u64, decrementers_next_tb);
111static DEFINE_PER_CPU(struct clock_event_device, decrementers);
112
113#define XSEC_PER_SEC (1024*1024)
114
115#ifdef CONFIG_PPC64
116#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
117#else
118/* compute ((xsec << 12) * max) >> 32 */
119#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
120#endif
121
122unsigned long tb_ticks_per_jiffy;
123unsigned long tb_ticks_per_usec = 100; /* sane default */
124EXPORT_SYMBOL(tb_ticks_per_usec);
125unsigned long tb_ticks_per_sec;
126EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
127
128DEFINE_SPINLOCK(rtc_lock);
129EXPORT_SYMBOL_GPL(rtc_lock);
130
131static u64 tb_to_ns_scale __read_mostly;
132static unsigned tb_to_ns_shift __read_mostly;
133static u64 boot_tb __read_mostly;
134
135extern struct timezone sys_tz;
136static long timezone_offset;
137
138unsigned long ppc_proc_freq;
139EXPORT_SYMBOL_GPL(ppc_proc_freq);
140unsigned long ppc_tb_freq;
141EXPORT_SYMBOL_GPL(ppc_tb_freq);
142
143bool tb_invalid;
144
145#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
146/*
147 * Factor for converting from cputime_t (timebase ticks) to
148 * microseconds. This is stored as 0.64 fixed-point binary fraction.
149 */
150u64 __cputime_usec_factor;
151EXPORT_SYMBOL(__cputime_usec_factor);
152
153#ifdef CONFIG_PPC_SPLPAR
154void (*dtl_consumer)(struct dtl_entry *, u64);
155#endif
156
157static void calc_cputime_factors(void)
158{
159 struct div_result res;
160
161 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
162 __cputime_usec_factor = res.result_low;
163}
164
165/*
166 * Read the SPURR on systems that have it, otherwise the PURR,
167 * or if that doesn't exist return the timebase value passed in.
168 */
169static inline unsigned long read_spurr(unsigned long tb)
170{
171 if (cpu_has_feature(CPU_FTR_SPURR))
172 return mfspr(SPRN_SPURR);
173 if (cpu_has_feature(CPU_FTR_PURR))
174 return mfspr(SPRN_PURR);
175 return tb;
176}
177
178#ifdef CONFIG_PPC_SPLPAR
179
180#include <asm/dtl.h>
181
182/*
183 * Scan the dispatch trace log and count up the stolen time.
184 * Should be called with interrupts disabled.
185 */
186static u64 scan_dispatch_log(u64 stop_tb)
187{
188 u64 i = local_paca->dtl_ridx;
189 struct dtl_entry *dtl = local_paca->dtl_curr;
190 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
191 struct lppaca *vpa = local_paca->lppaca_ptr;
192 u64 tb_delta;
193 u64 stolen = 0;
194 u64 dtb;
195
196 if (!dtl)
197 return 0;
198
199 if (i == be64_to_cpu(vpa->dtl_idx))
200 return 0;
201 while (i < be64_to_cpu(vpa->dtl_idx)) {
202 dtb = be64_to_cpu(dtl->timebase);
203 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
204 be32_to_cpu(dtl->ready_to_enqueue_time);
205 barrier();
206 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
207 /* buffer has overflowed */
208 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
209 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
210 continue;
211 }
212 if (dtb > stop_tb)
213 break;
214 if (dtl_consumer)
215 dtl_consumer(dtl, i);
216 stolen += tb_delta;
217 ++i;
218 ++dtl;
219 if (dtl == dtl_end)
220 dtl = local_paca->dispatch_log;
221 }
222 local_paca->dtl_ridx = i;
223 local_paca->dtl_curr = dtl;
224 return stolen;
225}
226
227/*
228 * Accumulate stolen time by scanning the dispatch trace log.
229 * Called on entry from user mode.
230 */
231void notrace accumulate_stolen_time(void)
232{
233 u64 sst, ust;
234 struct cpu_accounting_data *acct = &local_paca->accounting;
235
236 sst = scan_dispatch_log(acct->starttime_user);
237 ust = scan_dispatch_log(acct->starttime);
238 acct->stime -= sst;
239 acct->utime -= ust;
240 acct->steal_time += ust + sst;
241}
242
243static inline u64 calculate_stolen_time(u64 stop_tb)
244{
245 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
246 return 0;
247
248 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
249 return scan_dispatch_log(stop_tb);
250
251 return 0;
252}
253
254#else /* CONFIG_PPC_SPLPAR */
255static inline u64 calculate_stolen_time(u64 stop_tb)
256{
257 return 0;
258}
259
260#endif /* CONFIG_PPC_SPLPAR */
261
262/*
263 * Account time for a transition between system, hard irq
264 * or soft irq state.
265 */
266static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
267 unsigned long now, unsigned long stime)
268{
269 unsigned long stime_scaled = 0;
270#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
271 unsigned long nowscaled, deltascaled;
272 unsigned long utime, utime_scaled;
273
274 nowscaled = read_spurr(now);
275 deltascaled = nowscaled - acct->startspurr;
276 acct->startspurr = nowscaled;
277 utime = acct->utime - acct->utime_sspurr;
278 acct->utime_sspurr = acct->utime;
279
280 /*
281 * Because we don't read the SPURR on every kernel entry/exit,
282 * deltascaled includes both user and system SPURR ticks.
283 * Apportion these ticks to system SPURR ticks and user
284 * SPURR ticks in the same ratio as the system time (delta)
285 * and user time (udelta) values obtained from the timebase
286 * over the same interval. The system ticks get accounted here;
287 * the user ticks get saved up in paca->user_time_scaled to be
288 * used by account_process_tick.
289 */
290 stime_scaled = stime;
291 utime_scaled = utime;
292 if (deltascaled != stime + utime) {
293 if (utime) {
294 stime_scaled = deltascaled * stime / (stime + utime);
295 utime_scaled = deltascaled - stime_scaled;
296 } else {
297 stime_scaled = deltascaled;
298 }
299 }
300 acct->utime_scaled += utime_scaled;
301#endif
302
303 return stime_scaled;
304}
305
306static unsigned long vtime_delta(struct cpu_accounting_data *acct,
307 unsigned long *stime_scaled,
308 unsigned long *steal_time)
309{
310 unsigned long now, stime;
311
312 WARN_ON_ONCE(!irqs_disabled());
313
314 now = mftb();
315 stime = now - acct->starttime;
316 acct->starttime = now;
317
318 *stime_scaled = vtime_delta_scaled(acct, now, stime);
319
320 *steal_time = calculate_stolen_time(now);
321
322 return stime;
323}
324
325static void vtime_delta_kernel(struct cpu_accounting_data *acct,
326 unsigned long *stime, unsigned long *stime_scaled)
327{
328 unsigned long steal_time;
329
330 *stime = vtime_delta(acct, stime_scaled, &steal_time);
331 *stime -= min(*stime, steal_time);
332 acct->steal_time += steal_time;
333}
334
335void vtime_account_kernel(struct task_struct *tsk)
336{
337 struct cpu_accounting_data *acct = get_accounting(tsk);
338 unsigned long stime, stime_scaled;
339
340 vtime_delta_kernel(acct, &stime, &stime_scaled);
341
342 if (tsk->flags & PF_VCPU) {
343 acct->gtime += stime;
344#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
345 acct->utime_scaled += stime_scaled;
346#endif
347 } else {
348 acct->stime += stime;
349#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
350 acct->stime_scaled += stime_scaled;
351#endif
352 }
353}
354EXPORT_SYMBOL_GPL(vtime_account_kernel);
355
356void vtime_account_idle(struct task_struct *tsk)
357{
358 unsigned long stime, stime_scaled, steal_time;
359 struct cpu_accounting_data *acct = get_accounting(tsk);
360
361 stime = vtime_delta(acct, &stime_scaled, &steal_time);
362 acct->idle_time += stime + steal_time;
363}
364
365static void vtime_account_irq_field(struct cpu_accounting_data *acct,
366 unsigned long *field)
367{
368 unsigned long stime, stime_scaled;
369
370 vtime_delta_kernel(acct, &stime, &stime_scaled);
371 *field += stime;
372#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
373 acct->stime_scaled += stime_scaled;
374#endif
375}
376
377void vtime_account_softirq(struct task_struct *tsk)
378{
379 struct cpu_accounting_data *acct = get_accounting(tsk);
380 vtime_account_irq_field(acct, &acct->softirq_time);
381}
382
383void vtime_account_hardirq(struct task_struct *tsk)
384{
385 struct cpu_accounting_data *acct = get_accounting(tsk);
386 vtime_account_irq_field(acct, &acct->hardirq_time);
387}
388
389static void vtime_flush_scaled(struct task_struct *tsk,
390 struct cpu_accounting_data *acct)
391{
392#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
393 if (acct->utime_scaled)
394 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
395 if (acct->stime_scaled)
396 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
397
398 acct->utime_scaled = 0;
399 acct->utime_sspurr = 0;
400 acct->stime_scaled = 0;
401#endif
402}
403
404/*
405 * Account the whole cputime accumulated in the paca
406 * Must be called with interrupts disabled.
407 * Assumes that vtime_account_kernel/idle() has been called
408 * recently (i.e. since the last entry from usermode) so that
409 * get_paca()->user_time_scaled is up to date.
410 */
411void vtime_flush(struct task_struct *tsk)
412{
413 struct cpu_accounting_data *acct = get_accounting(tsk);
414
415 if (acct->utime)
416 account_user_time(tsk, cputime_to_nsecs(acct->utime));
417
418 if (acct->gtime)
419 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
420
421 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
422 account_steal_time(cputime_to_nsecs(acct->steal_time));
423 acct->steal_time = 0;
424 }
425
426 if (acct->idle_time)
427 account_idle_time(cputime_to_nsecs(acct->idle_time));
428
429 if (acct->stime)
430 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
431 CPUTIME_SYSTEM);
432
433 if (acct->hardirq_time)
434 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
435 CPUTIME_IRQ);
436 if (acct->softirq_time)
437 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
438 CPUTIME_SOFTIRQ);
439
440 vtime_flush_scaled(tsk, acct);
441
442 acct->utime = 0;
443 acct->gtime = 0;
444 acct->idle_time = 0;
445 acct->stime = 0;
446 acct->hardirq_time = 0;
447 acct->softirq_time = 0;
448}
449
450#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
451#define calc_cputime_factors()
452#endif
453
454void __delay(unsigned long loops)
455{
456 unsigned long start;
457
458 spin_begin();
459 if (tb_invalid) {
460 /*
461 * TB is in error state and isn't ticking anymore.
462 * HMI handler was unable to recover from TB error.
463 * Return immediately, so that kernel won't get stuck here.
464 */
465 spin_cpu_relax();
466 } else {
467 start = mftb();
468 while (mftb() - start < loops)
469 spin_cpu_relax();
470 }
471 spin_end();
472}
473EXPORT_SYMBOL(__delay);
474
475void udelay(unsigned long usecs)
476{
477 __delay(tb_ticks_per_usec * usecs);
478}
479EXPORT_SYMBOL(udelay);
480
481#ifdef CONFIG_SMP
482unsigned long profile_pc(struct pt_regs *regs)
483{
484 unsigned long pc = instruction_pointer(regs);
485
486 if (in_lock_functions(pc))
487 return regs->link;
488
489 return pc;
490}
491EXPORT_SYMBOL(profile_pc);
492#endif
493
494#ifdef CONFIG_IRQ_WORK
495
496/*
497 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
498 */
499#ifdef CONFIG_PPC64
500static inline void set_irq_work_pending_flag(void)
501{
502 asm volatile("stb %0,%1(13)" : :
503 "r" (1),
504 "i" (offsetof(struct paca_struct, irq_work_pending)));
505}
506
507static inline void clear_irq_work_pending(void)
508{
509 asm volatile("stb %0,%1(13)" : :
510 "r" (0),
511 "i" (offsetof(struct paca_struct, irq_work_pending)));
512}
513
514#else /* 32-bit */
515
516DEFINE_PER_CPU(u8, irq_work_pending);
517
518#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
519#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
520#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
521
522#endif /* 32 vs 64 bit */
523
524void arch_irq_work_raise(void)
525{
526 /*
527 * 64-bit code that uses irq soft-mask can just cause an immediate
528 * interrupt here that gets soft masked, if this is called under
529 * local_irq_disable(). It might be possible to prevent that happening
530 * by noticing interrupts are disabled and setting decrementer pending
531 * to be replayed when irqs are enabled. The problem there is that
532 * tracing can call irq_work_raise, including in code that does low
533 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
534 * which could get tangled up if we're messing with the same state
535 * here.
536 */
537 preempt_disable();
538 set_irq_work_pending_flag();
539 set_dec(1);
540 preempt_enable();
541}
542
543#else /* CONFIG_IRQ_WORK */
544
545#define test_irq_work_pending() 0
546#define clear_irq_work_pending()
547
548#endif /* CONFIG_IRQ_WORK */
549
550/*
551 * timer_interrupt - gets called when the decrementer overflows,
552 * with interrupts disabled.
553 */
554DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
555{
556 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
557 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
558 struct pt_regs *old_regs;
559 u64 now;
560
561 /*
562 * Some implementations of hotplug will get timer interrupts while
563 * offline, just ignore these.
564 */
565 if (unlikely(!cpu_online(smp_processor_id()))) {
566 set_dec(decrementer_max);
567 return;
568 }
569
570 /* Ensure a positive value is written to the decrementer, or else
571 * some CPUs will continue to take decrementer exceptions. When the
572 * PPC_WATCHDOG (decrementer based) is configured, keep this at most
573 * 31 bits, which is about 4 seconds on most systems, which gives
574 * the watchdog a chance of catching timer interrupt hard lockups.
575 */
576 if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
577 set_dec(0x7fffffff);
578 else
579 set_dec(decrementer_max);
580
581 /* Conditionally hard-enable interrupts now that the DEC has been
582 * bumped to its maximum value
583 */
584 may_hard_irq_enable();
585
586
587#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
588 if (atomic_read(&ppc_n_lost_interrupts) != 0)
589 __do_IRQ(regs);
590#endif
591
592 old_regs = set_irq_regs(regs);
593
594 trace_timer_interrupt_entry(regs);
595
596 if (test_irq_work_pending()) {
597 clear_irq_work_pending();
598 irq_work_run();
599 }
600
601 now = get_tb();
602 if (now >= *next_tb) {
603 *next_tb = ~(u64)0;
604 if (evt->event_handler)
605 evt->event_handler(evt);
606 __this_cpu_inc(irq_stat.timer_irqs_event);
607 } else {
608 now = *next_tb - now;
609 if (now <= decrementer_max)
610 set_dec(now);
611 /* We may have raced with new irq work */
612 if (test_irq_work_pending())
613 set_dec(1);
614 __this_cpu_inc(irq_stat.timer_irqs_others);
615 }
616
617 trace_timer_interrupt_exit(regs);
618
619 set_irq_regs(old_regs);
620}
621EXPORT_SYMBOL(timer_interrupt);
622
623#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
624void timer_broadcast_interrupt(void)
625{
626 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
627
628 *next_tb = ~(u64)0;
629 tick_receive_broadcast();
630 __this_cpu_inc(irq_stat.broadcast_irqs_event);
631}
632#endif
633
634#ifdef CONFIG_SUSPEND
635static void generic_suspend_disable_irqs(void)
636{
637 /* Disable the decrementer, so that it doesn't interfere
638 * with suspending.
639 */
640
641 set_dec(decrementer_max);
642 local_irq_disable();
643 set_dec(decrementer_max);
644}
645
646static void generic_suspend_enable_irqs(void)
647{
648 local_irq_enable();
649}
650
651/* Overrides the weak version in kernel/power/main.c */
652void arch_suspend_disable_irqs(void)
653{
654 if (ppc_md.suspend_disable_irqs)
655 ppc_md.suspend_disable_irqs();
656 generic_suspend_disable_irqs();
657}
658
659/* Overrides the weak version in kernel/power/main.c */
660void arch_suspend_enable_irqs(void)
661{
662 generic_suspend_enable_irqs();
663 if (ppc_md.suspend_enable_irqs)
664 ppc_md.suspend_enable_irqs();
665}
666#endif
667
668unsigned long long tb_to_ns(unsigned long long ticks)
669{
670 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
671}
672EXPORT_SYMBOL_GPL(tb_to_ns);
673
674/*
675 * Scheduler clock - returns current time in nanosec units.
676 *
677 * Note: mulhdu(a, b) (multiply high double unsigned) returns
678 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
679 * are 64-bit unsigned numbers.
680 */
681notrace unsigned long long sched_clock(void)
682{
683 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
684}
685
686
687#ifdef CONFIG_PPC_PSERIES
688
689/*
690 * Running clock - attempts to give a view of time passing for a virtualised
691 * kernels.
692 * Uses the VTB register if available otherwise a next best guess.
693 */
694unsigned long long running_clock(void)
695{
696 /*
697 * Don't read the VTB as a host since KVM does not switch in host
698 * timebase into the VTB when it takes a guest off the CPU, reading the
699 * VTB would result in reading 'last switched out' guest VTB.
700 *
701 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
702 * would be unsafe to rely only on the #ifdef above.
703 */
704 if (firmware_has_feature(FW_FEATURE_LPAR) &&
705 cpu_has_feature(CPU_FTR_ARCH_207S))
706 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
707
708 /*
709 * This is a next best approximation without a VTB.
710 * On a host which is running bare metal there should never be any stolen
711 * time and on a host which doesn't do any virtualisation TB *should* equal
712 * VTB so it makes no difference anyway.
713 */
714 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
715}
716#endif
717
718static int __init get_freq(char *name, int cells, unsigned long *val)
719{
720 struct device_node *cpu;
721 const __be32 *fp;
722 int found = 0;
723
724 /* The cpu node should have timebase and clock frequency properties */
725 cpu = of_find_node_by_type(NULL, "cpu");
726
727 if (cpu) {
728 fp = of_get_property(cpu, name, NULL);
729 if (fp) {
730 found = 1;
731 *val = of_read_ulong(fp, cells);
732 }
733
734 of_node_put(cpu);
735 }
736
737 return found;
738}
739
740static void start_cpu_decrementer(void)
741{
742#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
743 unsigned int tcr;
744
745 /* Clear any pending timer interrupts */
746 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
747
748 tcr = mfspr(SPRN_TCR);
749 /*
750 * The watchdog may have already been enabled by u-boot. So leave
751 * TRC[WP] (Watchdog Period) alone.
752 */
753 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
754 tcr |= TCR_DIE; /* Enable decrementer */
755 mtspr(SPRN_TCR, tcr);
756#endif
757}
758
759void __init generic_calibrate_decr(void)
760{
761 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
762
763 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
764 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
765
766 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
767 "(not found)\n");
768 }
769
770 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
771
772 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
773 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
774
775 printk(KERN_ERR "WARNING: Estimating processor frequency "
776 "(not found)\n");
777 }
778}
779
780int update_persistent_clock64(struct timespec64 now)
781{
782 struct rtc_time tm;
783
784 if (!ppc_md.set_rtc_time)
785 return -ENODEV;
786
787 rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
788
789 return ppc_md.set_rtc_time(&tm);
790}
791
792static void __read_persistent_clock(struct timespec64 *ts)
793{
794 struct rtc_time tm;
795 static int first = 1;
796
797 ts->tv_nsec = 0;
798 /* XXX this is a litle fragile but will work okay in the short term */
799 if (first) {
800 first = 0;
801 if (ppc_md.time_init)
802 timezone_offset = ppc_md.time_init();
803
804 /* get_boot_time() isn't guaranteed to be safe to call late */
805 if (ppc_md.get_boot_time) {
806 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
807 return;
808 }
809 }
810 if (!ppc_md.get_rtc_time) {
811 ts->tv_sec = 0;
812 return;
813 }
814 ppc_md.get_rtc_time(&tm);
815
816 ts->tv_sec = rtc_tm_to_time64(&tm);
817}
818
819void read_persistent_clock64(struct timespec64 *ts)
820{
821 __read_persistent_clock(ts);
822
823 /* Sanitize it in case real time clock is set below EPOCH */
824 if (ts->tv_sec < 0) {
825 ts->tv_sec = 0;
826 ts->tv_nsec = 0;
827 }
828
829}
830
831/* clocksource code */
832static notrace u64 timebase_read(struct clocksource *cs)
833{
834 return (u64)get_tb();
835}
836
837static void __init clocksource_init(void)
838{
839 struct clocksource *clock = &clocksource_timebase;
840
841 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
842 printk(KERN_ERR "clocksource: %s is already registered\n",
843 clock->name);
844 return;
845 }
846
847 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
848 clock->name, clock->mult, clock->shift);
849}
850
851static int decrementer_set_next_event(unsigned long evt,
852 struct clock_event_device *dev)
853{
854 __this_cpu_write(decrementers_next_tb, get_tb() + evt);
855 set_dec(evt);
856
857 /* We may have raced with new irq work */
858 if (test_irq_work_pending())
859 set_dec(1);
860
861 return 0;
862}
863
864static int decrementer_shutdown(struct clock_event_device *dev)
865{
866 decrementer_set_next_event(decrementer_max, dev);
867 return 0;
868}
869
870static void register_decrementer_clockevent(int cpu)
871{
872 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
873
874 *dec = decrementer_clockevent;
875 dec->cpumask = cpumask_of(cpu);
876
877 clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
878
879 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
880 dec->name, dec->mult, dec->shift, cpu);
881
882 /* Set values for KVM, see kvm_emulate_dec() */
883 decrementer_clockevent.mult = dec->mult;
884 decrementer_clockevent.shift = dec->shift;
885}
886
887static void enable_large_decrementer(void)
888{
889 if (!cpu_has_feature(CPU_FTR_ARCH_300))
890 return;
891
892 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
893 return;
894
895 /*
896 * If we're running as the hypervisor we need to enable the LD manually
897 * otherwise firmware should have done it for us.
898 */
899 if (cpu_has_feature(CPU_FTR_HVMODE))
900 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
901}
902
903static void __init set_decrementer_max(void)
904{
905 struct device_node *cpu;
906 u32 bits = 32;
907
908 /* Prior to ISAv3 the decrementer is always 32 bit */
909 if (!cpu_has_feature(CPU_FTR_ARCH_300))
910 return;
911
912 cpu = of_find_node_by_type(NULL, "cpu");
913
914 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
915 if (bits > 64 || bits < 32) {
916 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
917 bits = 32;
918 }
919
920 /* calculate the signed maximum given this many bits */
921 decrementer_max = (1ul << (bits - 1)) - 1;
922 }
923
924 of_node_put(cpu);
925
926 pr_info("time_init: %u bit decrementer (max: %llx)\n",
927 bits, decrementer_max);
928}
929
930static void __init init_decrementer_clockevent(void)
931{
932 register_decrementer_clockevent(smp_processor_id());
933}
934
935void secondary_cpu_time_init(void)
936{
937 /* Enable and test the large decrementer for this cpu */
938 enable_large_decrementer();
939
940 /* Start the decrementer on CPUs that have manual control
941 * such as BookE
942 */
943 start_cpu_decrementer();
944
945 /* FIME: Should make unrelatred change to move snapshot_timebase
946 * call here ! */
947 register_decrementer_clockevent(smp_processor_id());
948}
949
950/* This function is only called on the boot processor */
951void __init time_init(void)
952{
953 struct div_result res;
954 u64 scale;
955 unsigned shift;
956
957 /* Normal PowerPC with timebase register */
958 ppc_md.calibrate_decr();
959 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
960 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
961 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
962 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
963
964 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
965 tb_ticks_per_sec = ppc_tb_freq;
966 tb_ticks_per_usec = ppc_tb_freq / 1000000;
967 calc_cputime_factors();
968
969 /*
970 * Compute scale factor for sched_clock.
971 * The calibrate_decr() function has set tb_ticks_per_sec,
972 * which is the timebase frequency.
973 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
974 * the 128-bit result as a 64.64 fixed-point number.
975 * We then shift that number right until it is less than 1.0,
976 * giving us the scale factor and shift count to use in
977 * sched_clock().
978 */
979 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
980 scale = res.result_low;
981 for (shift = 0; res.result_high != 0; ++shift) {
982 scale = (scale >> 1) | (res.result_high << 63);
983 res.result_high >>= 1;
984 }
985 tb_to_ns_scale = scale;
986 tb_to_ns_shift = shift;
987 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
988 boot_tb = get_tb();
989
990 /* If platform provided a timezone (pmac), we correct the time */
991 if (timezone_offset) {
992 sys_tz.tz_minuteswest = -timezone_offset / 60;
993 sys_tz.tz_dsttime = 0;
994 }
995
996 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
997
998 /* initialise and enable the large decrementer (if we have one) */
999 set_decrementer_max();
1000 enable_large_decrementer();
1001
1002 /* Start the decrementer on CPUs that have manual control
1003 * such as BookE
1004 */
1005 start_cpu_decrementer();
1006
1007 /* Register the clocksource */
1008 clocksource_init();
1009
1010 init_decrementer_clockevent();
1011 tick_setup_hrtimer_broadcast();
1012
1013 of_clk_init(NULL);
1014 enable_sched_clock_irqtime();
1015}
1016
1017/*
1018 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1019 * result.
1020 */
1021void div128_by_32(u64 dividend_high, u64 dividend_low,
1022 unsigned divisor, struct div_result *dr)
1023{
1024 unsigned long a, b, c, d;
1025 unsigned long w, x, y, z;
1026 u64 ra, rb, rc;
1027
1028 a = dividend_high >> 32;
1029 b = dividend_high & 0xffffffff;
1030 c = dividend_low >> 32;
1031 d = dividend_low & 0xffffffff;
1032
1033 w = a / divisor;
1034 ra = ((u64)(a - (w * divisor)) << 32) + b;
1035
1036 rb = ((u64) do_div(ra, divisor) << 32) + c;
1037 x = ra;
1038
1039 rc = ((u64) do_div(rb, divisor) << 32) + d;
1040 y = rb;
1041
1042 do_div(rc, divisor);
1043 z = rc;
1044
1045 dr->result_high = ((u64)w << 32) + x;
1046 dr->result_low = ((u64)y << 32) + z;
1047
1048}
1049
1050/* We don't need to calibrate delay, we use the CPU timebase for that */
1051void calibrate_delay(void)
1052{
1053 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1054 * as the number of __delay(1) in a jiffy, so make it so
1055 */
1056 loops_per_jiffy = tb_ticks_per_jiffy;
1057}
1058
1059#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1060static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1061{
1062 ppc_md.get_rtc_time(tm);
1063 return 0;
1064}
1065
1066static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1067{
1068 if (!ppc_md.set_rtc_time)
1069 return -EOPNOTSUPP;
1070
1071 if (ppc_md.set_rtc_time(tm) < 0)
1072 return -EOPNOTSUPP;
1073
1074 return 0;
1075}
1076
1077static const struct rtc_class_ops rtc_generic_ops = {
1078 .read_time = rtc_generic_get_time,
1079 .set_time = rtc_generic_set_time,
1080};
1081
1082static int __init rtc_init(void)
1083{
1084 struct platform_device *pdev;
1085
1086 if (!ppc_md.get_rtc_time)
1087 return -ENODEV;
1088
1089 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1090 &rtc_generic_ops,
1091 sizeof(rtc_generic_ops));
1092
1093 return PTR_ERR_OR_ZERO(pdev);
1094}
1095
1096device_initcall(rtc_init);
1097#endif
1/*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time.
21 * - for astronomical applications: add a new function to get
22 * non ambiguous timestamps even around leap seconds. This needs
23 * a new timestamp format and a good name.
24 *
25 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
26 * "A Kernel Model for Precision Timekeeping" by Dave Mills
27 *
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
32 */
33
34#include <linux/errno.h>
35#include <linux/export.h>
36#include <linux/sched.h>
37#include <linux/kernel.h>
38#include <linux/param.h>
39#include <linux/string.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/timex.h>
43#include <linux/kernel_stat.h>
44#include <linux/time.h>
45#include <linux/clockchips.h>
46#include <linux/init.h>
47#include <linux/profile.h>
48#include <linux/cpu.h>
49#include <linux/security.h>
50#include <linux/percpu.h>
51#include <linux/rtc.h>
52#include <linux/jiffies.h>
53#include <linux/posix-timers.h>
54#include <linux/irq.h>
55#include <linux/delay.h>
56#include <linux/irq_work.h>
57#include <linux/clk-provider.h>
58#include <asm/trace.h>
59
60#include <asm/io.h>
61#include <asm/processor.h>
62#include <asm/nvram.h>
63#include <asm/cache.h>
64#include <asm/machdep.h>
65#include <asm/uaccess.h>
66#include <asm/time.h>
67#include <asm/prom.h>
68#include <asm/irq.h>
69#include <asm/div64.h>
70#include <asm/smp.h>
71#include <asm/vdso_datapage.h>
72#include <asm/firmware.h>
73#include <asm/cputime.h>
74
75/* powerpc clocksource/clockevent code */
76
77#include <linux/clockchips.h>
78#include <linux/timekeeper_internal.h>
79
80static cycle_t rtc_read(struct clocksource *);
81static struct clocksource clocksource_rtc = {
82 .name = "rtc",
83 .rating = 400,
84 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
85 .mask = CLOCKSOURCE_MASK(64),
86 .read = rtc_read,
87};
88
89static cycle_t timebase_read(struct clocksource *);
90static struct clocksource clocksource_timebase = {
91 .name = "timebase",
92 .rating = 400,
93 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
94 .mask = CLOCKSOURCE_MASK(64),
95 .read = timebase_read,
96};
97
98#define DECREMENTER_MAX 0x7fffffff
99
100static int decrementer_set_next_event(unsigned long evt,
101 struct clock_event_device *dev);
102static int decrementer_shutdown(struct clock_event_device *evt);
103
104struct clock_event_device decrementer_clockevent = {
105 .name = "decrementer",
106 .rating = 200,
107 .irq = 0,
108 .set_next_event = decrementer_set_next_event,
109 .set_state_shutdown = decrementer_shutdown,
110 .tick_resume = decrementer_shutdown,
111 .features = CLOCK_EVT_FEAT_ONESHOT |
112 CLOCK_EVT_FEAT_C3STOP,
113};
114EXPORT_SYMBOL(decrementer_clockevent);
115
116DEFINE_PER_CPU(u64, decrementers_next_tb);
117static DEFINE_PER_CPU(struct clock_event_device, decrementers);
118
119#define XSEC_PER_SEC (1024*1024)
120
121#ifdef CONFIG_PPC64
122#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
123#else
124/* compute ((xsec << 12) * max) >> 32 */
125#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
126#endif
127
128unsigned long tb_ticks_per_jiffy;
129unsigned long tb_ticks_per_usec = 100; /* sane default */
130EXPORT_SYMBOL(tb_ticks_per_usec);
131unsigned long tb_ticks_per_sec;
132EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
133
134DEFINE_SPINLOCK(rtc_lock);
135EXPORT_SYMBOL_GPL(rtc_lock);
136
137static u64 tb_to_ns_scale __read_mostly;
138static unsigned tb_to_ns_shift __read_mostly;
139static u64 boot_tb __read_mostly;
140
141extern struct timezone sys_tz;
142static long timezone_offset;
143
144unsigned long ppc_proc_freq;
145EXPORT_SYMBOL_GPL(ppc_proc_freq);
146unsigned long ppc_tb_freq;
147EXPORT_SYMBOL_GPL(ppc_tb_freq);
148
149#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
150/*
151 * Factors for converting from cputime_t (timebase ticks) to
152 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
153 * These are all stored as 0.64 fixed-point binary fractions.
154 */
155u64 __cputime_jiffies_factor;
156EXPORT_SYMBOL(__cputime_jiffies_factor);
157u64 __cputime_usec_factor;
158EXPORT_SYMBOL(__cputime_usec_factor);
159u64 __cputime_sec_factor;
160EXPORT_SYMBOL(__cputime_sec_factor);
161u64 __cputime_clockt_factor;
162EXPORT_SYMBOL(__cputime_clockt_factor);
163DEFINE_PER_CPU(unsigned long, cputime_last_delta);
164DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
165
166cputime_t cputime_one_jiffy;
167
168void (*dtl_consumer)(struct dtl_entry *, u64);
169
170static void calc_cputime_factors(void)
171{
172 struct div_result res;
173
174 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
175 __cputime_jiffies_factor = res.result_low;
176 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
177 __cputime_usec_factor = res.result_low;
178 div128_by_32(1, 0, tb_ticks_per_sec, &res);
179 __cputime_sec_factor = res.result_low;
180 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
181 __cputime_clockt_factor = res.result_low;
182}
183
184/*
185 * Read the SPURR on systems that have it, otherwise the PURR,
186 * or if that doesn't exist return the timebase value passed in.
187 */
188static u64 read_spurr(u64 tb)
189{
190 if (cpu_has_feature(CPU_FTR_SPURR))
191 return mfspr(SPRN_SPURR);
192 if (cpu_has_feature(CPU_FTR_PURR))
193 return mfspr(SPRN_PURR);
194 return tb;
195}
196
197#ifdef CONFIG_PPC_SPLPAR
198
199/*
200 * Scan the dispatch trace log and count up the stolen time.
201 * Should be called with interrupts disabled.
202 */
203static u64 scan_dispatch_log(u64 stop_tb)
204{
205 u64 i = local_paca->dtl_ridx;
206 struct dtl_entry *dtl = local_paca->dtl_curr;
207 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
208 struct lppaca *vpa = local_paca->lppaca_ptr;
209 u64 tb_delta;
210 u64 stolen = 0;
211 u64 dtb;
212
213 if (!dtl)
214 return 0;
215
216 if (i == be64_to_cpu(vpa->dtl_idx))
217 return 0;
218 while (i < be64_to_cpu(vpa->dtl_idx)) {
219 dtb = be64_to_cpu(dtl->timebase);
220 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
221 be32_to_cpu(dtl->ready_to_enqueue_time);
222 barrier();
223 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
224 /* buffer has overflowed */
225 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
226 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
227 continue;
228 }
229 if (dtb > stop_tb)
230 break;
231 if (dtl_consumer)
232 dtl_consumer(dtl, i);
233 stolen += tb_delta;
234 ++i;
235 ++dtl;
236 if (dtl == dtl_end)
237 dtl = local_paca->dispatch_log;
238 }
239 local_paca->dtl_ridx = i;
240 local_paca->dtl_curr = dtl;
241 return stolen;
242}
243
244/*
245 * Accumulate stolen time by scanning the dispatch trace log.
246 * Called on entry from user mode.
247 */
248void accumulate_stolen_time(void)
249{
250 u64 sst, ust;
251
252 u8 save_soft_enabled = local_paca->soft_enabled;
253
254 /* We are called early in the exception entry, before
255 * soft/hard_enabled are sync'ed to the expected state
256 * for the exception. We are hard disabled but the PACA
257 * needs to reflect that so various debug stuff doesn't
258 * complain
259 */
260 local_paca->soft_enabled = 0;
261
262 sst = scan_dispatch_log(local_paca->starttime_user);
263 ust = scan_dispatch_log(local_paca->starttime);
264 local_paca->system_time -= sst;
265 local_paca->user_time -= ust;
266 local_paca->stolen_time += ust + sst;
267
268 local_paca->soft_enabled = save_soft_enabled;
269}
270
271static inline u64 calculate_stolen_time(u64 stop_tb)
272{
273 u64 stolen = 0;
274
275 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
276 stolen = scan_dispatch_log(stop_tb);
277 get_paca()->system_time -= stolen;
278 }
279
280 stolen += get_paca()->stolen_time;
281 get_paca()->stolen_time = 0;
282 return stolen;
283}
284
285#else /* CONFIG_PPC_SPLPAR */
286static inline u64 calculate_stolen_time(u64 stop_tb)
287{
288 return 0;
289}
290
291#endif /* CONFIG_PPC_SPLPAR */
292
293/*
294 * Account time for a transition between system, hard irq
295 * or soft irq state.
296 */
297static u64 vtime_delta(struct task_struct *tsk,
298 u64 *sys_scaled, u64 *stolen)
299{
300 u64 now, nowscaled, deltascaled;
301 u64 udelta, delta, user_scaled;
302
303 WARN_ON_ONCE(!irqs_disabled());
304
305 now = mftb();
306 nowscaled = read_spurr(now);
307 get_paca()->system_time += now - get_paca()->starttime;
308 get_paca()->starttime = now;
309 deltascaled = nowscaled - get_paca()->startspurr;
310 get_paca()->startspurr = nowscaled;
311
312 *stolen = calculate_stolen_time(now);
313
314 delta = get_paca()->system_time;
315 get_paca()->system_time = 0;
316 udelta = get_paca()->user_time - get_paca()->utime_sspurr;
317 get_paca()->utime_sspurr = get_paca()->user_time;
318
319 /*
320 * Because we don't read the SPURR on every kernel entry/exit,
321 * deltascaled includes both user and system SPURR ticks.
322 * Apportion these ticks to system SPURR ticks and user
323 * SPURR ticks in the same ratio as the system time (delta)
324 * and user time (udelta) values obtained from the timebase
325 * over the same interval. The system ticks get accounted here;
326 * the user ticks get saved up in paca->user_time_scaled to be
327 * used by account_process_tick.
328 */
329 *sys_scaled = delta;
330 user_scaled = udelta;
331 if (deltascaled != delta + udelta) {
332 if (udelta) {
333 *sys_scaled = deltascaled * delta / (delta + udelta);
334 user_scaled = deltascaled - *sys_scaled;
335 } else {
336 *sys_scaled = deltascaled;
337 }
338 }
339 get_paca()->user_time_scaled += user_scaled;
340
341 return delta;
342}
343
344void vtime_account_system(struct task_struct *tsk)
345{
346 u64 delta, sys_scaled, stolen;
347
348 delta = vtime_delta(tsk, &sys_scaled, &stolen);
349 account_system_time(tsk, 0, delta, sys_scaled);
350 if (stolen)
351 account_steal_time(stolen);
352}
353EXPORT_SYMBOL_GPL(vtime_account_system);
354
355void vtime_account_idle(struct task_struct *tsk)
356{
357 u64 delta, sys_scaled, stolen;
358
359 delta = vtime_delta(tsk, &sys_scaled, &stolen);
360 account_idle_time(delta + stolen);
361}
362
363/*
364 * Transfer the user time accumulated in the paca
365 * by the exception entry and exit code to the generic
366 * process user time records.
367 * Must be called with interrupts disabled.
368 * Assumes that vtime_account_system/idle() has been called
369 * recently (i.e. since the last entry from usermode) so that
370 * get_paca()->user_time_scaled is up to date.
371 */
372void vtime_account_user(struct task_struct *tsk)
373{
374 cputime_t utime, utimescaled;
375
376 utime = get_paca()->user_time;
377 utimescaled = get_paca()->user_time_scaled;
378 get_paca()->user_time = 0;
379 get_paca()->user_time_scaled = 0;
380 get_paca()->utime_sspurr = 0;
381 account_user_time(tsk, utime, utimescaled);
382}
383
384#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
385#define calc_cputime_factors()
386#endif
387
388void __delay(unsigned long loops)
389{
390 unsigned long start;
391 int diff;
392
393 if (__USE_RTC()) {
394 start = get_rtcl();
395 do {
396 /* the RTCL register wraps at 1000000000 */
397 diff = get_rtcl() - start;
398 if (diff < 0)
399 diff += 1000000000;
400 } while (diff < loops);
401 } else {
402 start = get_tbl();
403 while (get_tbl() - start < loops)
404 HMT_low();
405 HMT_medium();
406 }
407}
408EXPORT_SYMBOL(__delay);
409
410void udelay(unsigned long usecs)
411{
412 __delay(tb_ticks_per_usec * usecs);
413}
414EXPORT_SYMBOL(udelay);
415
416#ifdef CONFIG_SMP
417unsigned long profile_pc(struct pt_regs *regs)
418{
419 unsigned long pc = instruction_pointer(regs);
420
421 if (in_lock_functions(pc))
422 return regs->link;
423
424 return pc;
425}
426EXPORT_SYMBOL(profile_pc);
427#endif
428
429#ifdef CONFIG_IRQ_WORK
430
431/*
432 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
433 */
434#ifdef CONFIG_PPC64
435static inline unsigned long test_irq_work_pending(void)
436{
437 unsigned long x;
438
439 asm volatile("lbz %0,%1(13)"
440 : "=r" (x)
441 : "i" (offsetof(struct paca_struct, irq_work_pending)));
442 return x;
443}
444
445static inline void set_irq_work_pending_flag(void)
446{
447 asm volatile("stb %0,%1(13)" : :
448 "r" (1),
449 "i" (offsetof(struct paca_struct, irq_work_pending)));
450}
451
452static inline void clear_irq_work_pending(void)
453{
454 asm volatile("stb %0,%1(13)" : :
455 "r" (0),
456 "i" (offsetof(struct paca_struct, irq_work_pending)));
457}
458
459#else /* 32-bit */
460
461DEFINE_PER_CPU(u8, irq_work_pending);
462
463#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
464#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
465#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
466
467#endif /* 32 vs 64 bit */
468
469void arch_irq_work_raise(void)
470{
471 preempt_disable();
472 set_irq_work_pending_flag();
473 set_dec(1);
474 preempt_enable();
475}
476
477#else /* CONFIG_IRQ_WORK */
478
479#define test_irq_work_pending() 0
480#define clear_irq_work_pending()
481
482#endif /* CONFIG_IRQ_WORK */
483
484static void __timer_interrupt(void)
485{
486 struct pt_regs *regs = get_irq_regs();
487 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
488 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
489 u64 now;
490
491 trace_timer_interrupt_entry(regs);
492
493 if (test_irq_work_pending()) {
494 clear_irq_work_pending();
495 irq_work_run();
496 }
497
498 now = get_tb_or_rtc();
499 if (now >= *next_tb) {
500 *next_tb = ~(u64)0;
501 if (evt->event_handler)
502 evt->event_handler(evt);
503 __this_cpu_inc(irq_stat.timer_irqs_event);
504 } else {
505 now = *next_tb - now;
506 if (now <= DECREMENTER_MAX)
507 set_dec((int)now);
508 /* We may have raced with new irq work */
509 if (test_irq_work_pending())
510 set_dec(1);
511 __this_cpu_inc(irq_stat.timer_irqs_others);
512 }
513
514#ifdef CONFIG_PPC64
515 /* collect purr register values often, for accurate calculations */
516 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
517 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
518 cu->current_tb = mfspr(SPRN_PURR);
519 }
520#endif
521
522 trace_timer_interrupt_exit(regs);
523}
524
525/*
526 * timer_interrupt - gets called when the decrementer overflows,
527 * with interrupts disabled.
528 */
529void timer_interrupt(struct pt_regs * regs)
530{
531 struct pt_regs *old_regs;
532 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
533
534 /* Ensure a positive value is written to the decrementer, or else
535 * some CPUs will continue to take decrementer exceptions.
536 */
537 set_dec(DECREMENTER_MAX);
538
539 /* Some implementations of hotplug will get timer interrupts while
540 * offline, just ignore these and we also need to set
541 * decrementers_next_tb as MAX to make sure __check_irq_replay
542 * don't replay timer interrupt when return, otherwise we'll trap
543 * here infinitely :(
544 */
545 if (!cpu_online(smp_processor_id())) {
546 *next_tb = ~(u64)0;
547 return;
548 }
549
550 /* Conditionally hard-enable interrupts now that the DEC has been
551 * bumped to its maximum value
552 */
553 may_hard_irq_enable();
554
555
556#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
557 if (atomic_read(&ppc_n_lost_interrupts) != 0)
558 do_IRQ(regs);
559#endif
560
561 old_regs = set_irq_regs(regs);
562 irq_enter();
563
564 __timer_interrupt();
565 irq_exit();
566 set_irq_regs(old_regs);
567}
568
569/*
570 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
571 * left pending on exit from a KVM guest. We don't need to do anything
572 * to clear them, as they are edge-triggered.
573 */
574void hdec_interrupt(struct pt_regs *regs)
575{
576}
577
578#ifdef CONFIG_SUSPEND
579static void generic_suspend_disable_irqs(void)
580{
581 /* Disable the decrementer, so that it doesn't interfere
582 * with suspending.
583 */
584
585 set_dec(DECREMENTER_MAX);
586 local_irq_disable();
587 set_dec(DECREMENTER_MAX);
588}
589
590static void generic_suspend_enable_irqs(void)
591{
592 local_irq_enable();
593}
594
595/* Overrides the weak version in kernel/power/main.c */
596void arch_suspend_disable_irqs(void)
597{
598 if (ppc_md.suspend_disable_irqs)
599 ppc_md.suspend_disable_irqs();
600 generic_suspend_disable_irqs();
601}
602
603/* Overrides the weak version in kernel/power/main.c */
604void arch_suspend_enable_irqs(void)
605{
606 generic_suspend_enable_irqs();
607 if (ppc_md.suspend_enable_irqs)
608 ppc_md.suspend_enable_irqs();
609}
610#endif
611
612unsigned long long tb_to_ns(unsigned long long ticks)
613{
614 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
615}
616EXPORT_SYMBOL_GPL(tb_to_ns);
617
618/*
619 * Scheduler clock - returns current time in nanosec units.
620 *
621 * Note: mulhdu(a, b) (multiply high double unsigned) returns
622 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
623 * are 64-bit unsigned numbers.
624 */
625unsigned long long sched_clock(void)
626{
627 if (__USE_RTC())
628 return get_rtc();
629 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
630}
631
632
633#ifdef CONFIG_PPC_PSERIES
634
635/*
636 * Running clock - attempts to give a view of time passing for a virtualised
637 * kernels.
638 * Uses the VTB register if available otherwise a next best guess.
639 */
640unsigned long long running_clock(void)
641{
642 /*
643 * Don't read the VTB as a host since KVM does not switch in host
644 * timebase into the VTB when it takes a guest off the CPU, reading the
645 * VTB would result in reading 'last switched out' guest VTB.
646 *
647 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
648 * would be unsafe to rely only on the #ifdef above.
649 */
650 if (firmware_has_feature(FW_FEATURE_LPAR) &&
651 cpu_has_feature(CPU_FTR_ARCH_207S))
652 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
653
654 /*
655 * This is a next best approximation without a VTB.
656 * On a host which is running bare metal there should never be any stolen
657 * time and on a host which doesn't do any virtualisation TB *should* equal
658 * VTB so it makes no difference anyway.
659 */
660 return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]);
661}
662#endif
663
664static int __init get_freq(char *name, int cells, unsigned long *val)
665{
666 struct device_node *cpu;
667 const __be32 *fp;
668 int found = 0;
669
670 /* The cpu node should have timebase and clock frequency properties */
671 cpu = of_find_node_by_type(NULL, "cpu");
672
673 if (cpu) {
674 fp = of_get_property(cpu, name, NULL);
675 if (fp) {
676 found = 1;
677 *val = of_read_ulong(fp, cells);
678 }
679
680 of_node_put(cpu);
681 }
682
683 return found;
684}
685
686static void start_cpu_decrementer(void)
687{
688#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
689 /* Clear any pending timer interrupts */
690 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
691
692 /* Enable decrementer interrupt */
693 mtspr(SPRN_TCR, TCR_DIE);
694#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
695}
696
697void __init generic_calibrate_decr(void)
698{
699 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
700
701 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
702 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
703
704 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
705 "(not found)\n");
706 }
707
708 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
709
710 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
711 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
712
713 printk(KERN_ERR "WARNING: Estimating processor frequency "
714 "(not found)\n");
715 }
716}
717
718int update_persistent_clock(struct timespec now)
719{
720 struct rtc_time tm;
721
722 if (!ppc_md.set_rtc_time)
723 return -ENODEV;
724
725 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
726 tm.tm_year -= 1900;
727 tm.tm_mon -= 1;
728
729 return ppc_md.set_rtc_time(&tm);
730}
731
732static void __read_persistent_clock(struct timespec *ts)
733{
734 struct rtc_time tm;
735 static int first = 1;
736
737 ts->tv_nsec = 0;
738 /* XXX this is a litle fragile but will work okay in the short term */
739 if (first) {
740 first = 0;
741 if (ppc_md.time_init)
742 timezone_offset = ppc_md.time_init();
743
744 /* get_boot_time() isn't guaranteed to be safe to call late */
745 if (ppc_md.get_boot_time) {
746 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
747 return;
748 }
749 }
750 if (!ppc_md.get_rtc_time) {
751 ts->tv_sec = 0;
752 return;
753 }
754 ppc_md.get_rtc_time(&tm);
755
756 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
757 tm.tm_hour, tm.tm_min, tm.tm_sec);
758}
759
760void read_persistent_clock(struct timespec *ts)
761{
762 __read_persistent_clock(ts);
763
764 /* Sanitize it in case real time clock is set below EPOCH */
765 if (ts->tv_sec < 0) {
766 ts->tv_sec = 0;
767 ts->tv_nsec = 0;
768 }
769
770}
771
772/* clocksource code */
773static cycle_t rtc_read(struct clocksource *cs)
774{
775 return (cycle_t)get_rtc();
776}
777
778static cycle_t timebase_read(struct clocksource *cs)
779{
780 return (cycle_t)get_tb();
781}
782
783void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
784 struct clocksource *clock, u32 mult, cycle_t cycle_last)
785{
786 u64 new_tb_to_xs, new_stamp_xsec;
787 u32 frac_sec;
788
789 if (clock != &clocksource_timebase)
790 return;
791
792 /* Make userspace gettimeofday spin until we're done. */
793 ++vdso_data->tb_update_count;
794 smp_mb();
795
796 /* 19342813113834067 ~= 2^(20+64) / 1e9 */
797 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
798 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
799 do_div(new_stamp_xsec, 1000000000);
800 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
801
802 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
803 /* this is tv_nsec / 1e9 as a 0.32 fraction */
804 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
805
806 /*
807 * tb_update_count is used to allow the userspace gettimeofday code
808 * to assure itself that it sees a consistent view of the tb_to_xs and
809 * stamp_xsec variables. It reads the tb_update_count, then reads
810 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
811 * the two values of tb_update_count match and are even then the
812 * tb_to_xs and stamp_xsec values are consistent. If not, then it
813 * loops back and reads them again until this criteria is met.
814 * We expect the caller to have done the first increment of
815 * vdso_data->tb_update_count already.
816 */
817 vdso_data->tb_orig_stamp = cycle_last;
818 vdso_data->stamp_xsec = new_stamp_xsec;
819 vdso_data->tb_to_xs = new_tb_to_xs;
820 vdso_data->wtom_clock_sec = wtm->tv_sec;
821 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
822 vdso_data->stamp_xtime = *wall_time;
823 vdso_data->stamp_sec_fraction = frac_sec;
824 smp_wmb();
825 ++(vdso_data->tb_update_count);
826}
827
828void update_vsyscall_tz(void)
829{
830 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
831 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
832}
833
834static void __init clocksource_init(void)
835{
836 struct clocksource *clock;
837
838 if (__USE_RTC())
839 clock = &clocksource_rtc;
840 else
841 clock = &clocksource_timebase;
842
843 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
844 printk(KERN_ERR "clocksource: %s is already registered\n",
845 clock->name);
846 return;
847 }
848
849 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
850 clock->name, clock->mult, clock->shift);
851}
852
853static int decrementer_set_next_event(unsigned long evt,
854 struct clock_event_device *dev)
855{
856 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
857 set_dec(evt);
858
859 /* We may have raced with new irq work */
860 if (test_irq_work_pending())
861 set_dec(1);
862
863 return 0;
864}
865
866static int decrementer_shutdown(struct clock_event_device *dev)
867{
868 decrementer_set_next_event(DECREMENTER_MAX, dev);
869 return 0;
870}
871
872/* Interrupt handler for the timer broadcast IPI */
873void tick_broadcast_ipi_handler(void)
874{
875 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
876
877 *next_tb = get_tb_or_rtc();
878 __timer_interrupt();
879}
880
881static void register_decrementer_clockevent(int cpu)
882{
883 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
884
885 *dec = decrementer_clockevent;
886 dec->cpumask = cpumask_of(cpu);
887
888 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
889 dec->name, dec->mult, dec->shift, cpu);
890
891 clockevents_register_device(dec);
892}
893
894static void __init init_decrementer_clockevent(void)
895{
896 int cpu = smp_processor_id();
897
898 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
899
900 decrementer_clockevent.max_delta_ns =
901 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
902 decrementer_clockevent.min_delta_ns =
903 clockevent_delta2ns(2, &decrementer_clockevent);
904
905 register_decrementer_clockevent(cpu);
906}
907
908void secondary_cpu_time_init(void)
909{
910 /* Start the decrementer on CPUs that have manual control
911 * such as BookE
912 */
913 start_cpu_decrementer();
914
915 /* FIME: Should make unrelatred change to move snapshot_timebase
916 * call here ! */
917 register_decrementer_clockevent(smp_processor_id());
918}
919
920/* This function is only called on the boot processor */
921void __init time_init(void)
922{
923 struct div_result res;
924 u64 scale;
925 unsigned shift;
926
927 if (__USE_RTC()) {
928 /* 601 processor: dec counts down by 128 every 128ns */
929 ppc_tb_freq = 1000000000;
930 } else {
931 /* Normal PowerPC with timebase register */
932 ppc_md.calibrate_decr();
933 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
934 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
935 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
936 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
937 }
938
939 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
940 tb_ticks_per_sec = ppc_tb_freq;
941 tb_ticks_per_usec = ppc_tb_freq / 1000000;
942 calc_cputime_factors();
943 setup_cputime_one_jiffy();
944
945 /*
946 * Compute scale factor for sched_clock.
947 * The calibrate_decr() function has set tb_ticks_per_sec,
948 * which is the timebase frequency.
949 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
950 * the 128-bit result as a 64.64 fixed-point number.
951 * We then shift that number right until it is less than 1.0,
952 * giving us the scale factor and shift count to use in
953 * sched_clock().
954 */
955 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
956 scale = res.result_low;
957 for (shift = 0; res.result_high != 0; ++shift) {
958 scale = (scale >> 1) | (res.result_high << 63);
959 res.result_high >>= 1;
960 }
961 tb_to_ns_scale = scale;
962 tb_to_ns_shift = shift;
963 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
964 boot_tb = get_tb_or_rtc();
965
966 /* If platform provided a timezone (pmac), we correct the time */
967 if (timezone_offset) {
968 sys_tz.tz_minuteswest = -timezone_offset / 60;
969 sys_tz.tz_dsttime = 0;
970 }
971
972 vdso_data->tb_update_count = 0;
973 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
974
975 /* Start the decrementer on CPUs that have manual control
976 * such as BookE
977 */
978 start_cpu_decrementer();
979
980 /* Register the clocksource */
981 clocksource_init();
982
983 init_decrementer_clockevent();
984 tick_setup_hrtimer_broadcast();
985
986#ifdef CONFIG_COMMON_CLK
987 of_clk_init(NULL);
988#endif
989}
990
991
992#define FEBRUARY 2
993#define STARTOFTIME 1970
994#define SECDAY 86400L
995#define SECYR (SECDAY * 365)
996#define leapyear(year) ((year) % 4 == 0 && \
997 ((year) % 100 != 0 || (year) % 400 == 0))
998#define days_in_year(a) (leapyear(a) ? 366 : 365)
999#define days_in_month(a) (month_days[(a) - 1])
1000
1001static int month_days[12] = {
1002 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1003};
1004
1005void to_tm(int tim, struct rtc_time * tm)
1006{
1007 register int i;
1008 register long hms, day;
1009
1010 day = tim / SECDAY;
1011 hms = tim % SECDAY;
1012
1013 /* Hours, minutes, seconds are easy */
1014 tm->tm_hour = hms / 3600;
1015 tm->tm_min = (hms % 3600) / 60;
1016 tm->tm_sec = (hms % 3600) % 60;
1017
1018 /* Number of years in days */
1019 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1020 day -= days_in_year(i);
1021 tm->tm_year = i;
1022
1023 /* Number of months in days left */
1024 if (leapyear(tm->tm_year))
1025 days_in_month(FEBRUARY) = 29;
1026 for (i = 1; day >= days_in_month(i); i++)
1027 day -= days_in_month(i);
1028 days_in_month(FEBRUARY) = 28;
1029 tm->tm_mon = i;
1030
1031 /* Days are what is left over (+1) from all that. */
1032 tm->tm_mday = day + 1;
1033
1034 /*
1035 * No-one uses the day of the week.
1036 */
1037 tm->tm_wday = -1;
1038}
1039EXPORT_SYMBOL(to_tm);
1040
1041/*
1042 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1043 * result.
1044 */
1045void div128_by_32(u64 dividend_high, u64 dividend_low,
1046 unsigned divisor, struct div_result *dr)
1047{
1048 unsigned long a, b, c, d;
1049 unsigned long w, x, y, z;
1050 u64 ra, rb, rc;
1051
1052 a = dividend_high >> 32;
1053 b = dividend_high & 0xffffffff;
1054 c = dividend_low >> 32;
1055 d = dividend_low & 0xffffffff;
1056
1057 w = a / divisor;
1058 ra = ((u64)(a - (w * divisor)) << 32) + b;
1059
1060 rb = ((u64) do_div(ra, divisor) << 32) + c;
1061 x = ra;
1062
1063 rc = ((u64) do_div(rb, divisor) << 32) + d;
1064 y = rb;
1065
1066 do_div(rc, divisor);
1067 z = rc;
1068
1069 dr->result_high = ((u64)w << 32) + x;
1070 dr->result_low = ((u64)y << 32) + z;
1071
1072}
1073
1074/* We don't need to calibrate delay, we use the CPU timebase for that */
1075void calibrate_delay(void)
1076{
1077 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1078 * as the number of __delay(1) in a jiffy, so make it so
1079 */
1080 loops_per_jiffy = tb_ticks_per_jiffy;
1081}
1082
1083static int __init rtc_init(void)
1084{
1085 struct platform_device *pdev;
1086
1087 if (!ppc_md.get_rtc_time)
1088 return -ENODEV;
1089
1090 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1091
1092 return PTR_ERR_OR_ZERO(pdev);
1093}
1094
1095device_initcall(rtc_init);