Loading...
1/*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time.
21 * - for astronomical applications: add a new function to get
22 * non ambiguous timestamps even around leap seconds. This needs
23 * a new timestamp format and a good name.
24 *
25 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
26 * "A Kernel Model for Precision Timekeeping" by Dave Mills
27 *
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
32 */
33
34#include <linux/errno.h>
35#include <linux/export.h>
36#include <linux/sched.h>
37#include <linux/sched/clock.h>
38#include <linux/kernel.h>
39#include <linux/param.h>
40#include <linux/string.h>
41#include <linux/mm.h>
42#include <linux/interrupt.h>
43#include <linux/timex.h>
44#include <linux/kernel_stat.h>
45#include <linux/time.h>
46#include <linux/clockchips.h>
47#include <linux/init.h>
48#include <linux/profile.h>
49#include <linux/cpu.h>
50#include <linux/security.h>
51#include <linux/percpu.h>
52#include <linux/rtc.h>
53#include <linux/jiffies.h>
54#include <linux/posix-timers.h>
55#include <linux/irq.h>
56#include <linux/delay.h>
57#include <linux/irq_work.h>
58#include <linux/clk-provider.h>
59#include <linux/suspend.h>
60#include <linux/rtc.h>
61#include <linux/sched/cputime.h>
62#include <linux/processor.h>
63#include <asm/trace.h>
64
65#include <asm/io.h>
66#include <asm/nvram.h>
67#include <asm/cache.h>
68#include <asm/machdep.h>
69#include <linux/uaccess.h>
70#include <asm/time.h>
71#include <asm/prom.h>
72#include <asm/irq.h>
73#include <asm/div64.h>
74#include <asm/smp.h>
75#include <asm/vdso_datapage.h>
76#include <asm/firmware.h>
77#include <asm/asm-prototypes.h>
78
79/* powerpc clocksource/clockevent code */
80
81#include <linux/clockchips.h>
82#include <linux/timekeeper_internal.h>
83
84static u64 rtc_read(struct clocksource *);
85static struct clocksource clocksource_rtc = {
86 .name = "rtc",
87 .rating = 400,
88 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
89 .mask = CLOCKSOURCE_MASK(64),
90 .read = rtc_read,
91};
92
93static u64 timebase_read(struct clocksource *);
94static struct clocksource clocksource_timebase = {
95 .name = "timebase",
96 .rating = 400,
97 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
98 .mask = CLOCKSOURCE_MASK(64),
99 .read = timebase_read,
100};
101
102#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
103u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
104
105static int decrementer_set_next_event(unsigned long evt,
106 struct clock_event_device *dev);
107static int decrementer_shutdown(struct clock_event_device *evt);
108
109struct clock_event_device decrementer_clockevent = {
110 .name = "decrementer",
111 .rating = 200,
112 .irq = 0,
113 .set_next_event = decrementer_set_next_event,
114 .set_state_shutdown = decrementer_shutdown,
115 .tick_resume = decrementer_shutdown,
116 .features = CLOCK_EVT_FEAT_ONESHOT |
117 CLOCK_EVT_FEAT_C3STOP,
118};
119EXPORT_SYMBOL(decrementer_clockevent);
120
121DEFINE_PER_CPU(u64, decrementers_next_tb);
122static DEFINE_PER_CPU(struct clock_event_device, decrementers);
123
124#define XSEC_PER_SEC (1024*1024)
125
126#ifdef CONFIG_PPC64
127#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
128#else
129/* compute ((xsec << 12) * max) >> 32 */
130#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
131#endif
132
133unsigned long tb_ticks_per_jiffy;
134unsigned long tb_ticks_per_usec = 100; /* sane default */
135EXPORT_SYMBOL(tb_ticks_per_usec);
136unsigned long tb_ticks_per_sec;
137EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
138
139DEFINE_SPINLOCK(rtc_lock);
140EXPORT_SYMBOL_GPL(rtc_lock);
141
142static u64 tb_to_ns_scale __read_mostly;
143static unsigned tb_to_ns_shift __read_mostly;
144static u64 boot_tb __read_mostly;
145
146extern struct timezone sys_tz;
147static long timezone_offset;
148
149unsigned long ppc_proc_freq;
150EXPORT_SYMBOL_GPL(ppc_proc_freq);
151unsigned long ppc_tb_freq;
152EXPORT_SYMBOL_GPL(ppc_tb_freq);
153
154#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
155/*
156 * Factor for converting from cputime_t (timebase ticks) to
157 * microseconds. This is stored as 0.64 fixed-point binary fraction.
158 */
159u64 __cputime_usec_factor;
160EXPORT_SYMBOL(__cputime_usec_factor);
161
162#ifdef CONFIG_PPC_SPLPAR
163void (*dtl_consumer)(struct dtl_entry *, u64);
164#endif
165
166#ifdef CONFIG_PPC64
167#define get_accounting(tsk) (&get_paca()->accounting)
168#else
169#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
170#endif
171
172static void calc_cputime_factors(void)
173{
174 struct div_result res;
175
176 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
177 __cputime_usec_factor = res.result_low;
178}
179
180/*
181 * Read the SPURR on systems that have it, otherwise the PURR,
182 * or if that doesn't exist return the timebase value passed in.
183 */
184static unsigned long read_spurr(unsigned long tb)
185{
186 if (cpu_has_feature(CPU_FTR_SPURR))
187 return mfspr(SPRN_SPURR);
188 if (cpu_has_feature(CPU_FTR_PURR))
189 return mfspr(SPRN_PURR);
190 return tb;
191}
192
193#ifdef CONFIG_PPC_SPLPAR
194
195/*
196 * Scan the dispatch trace log and count up the stolen time.
197 * Should be called with interrupts disabled.
198 */
199static u64 scan_dispatch_log(u64 stop_tb)
200{
201 u64 i = local_paca->dtl_ridx;
202 struct dtl_entry *dtl = local_paca->dtl_curr;
203 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
204 struct lppaca *vpa = local_paca->lppaca_ptr;
205 u64 tb_delta;
206 u64 stolen = 0;
207 u64 dtb;
208
209 if (!dtl)
210 return 0;
211
212 if (i == be64_to_cpu(vpa->dtl_idx))
213 return 0;
214 while (i < be64_to_cpu(vpa->dtl_idx)) {
215 dtb = be64_to_cpu(dtl->timebase);
216 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
217 be32_to_cpu(dtl->ready_to_enqueue_time);
218 barrier();
219 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
220 /* buffer has overflowed */
221 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
222 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
223 continue;
224 }
225 if (dtb > stop_tb)
226 break;
227 if (dtl_consumer)
228 dtl_consumer(dtl, i);
229 stolen += tb_delta;
230 ++i;
231 ++dtl;
232 if (dtl == dtl_end)
233 dtl = local_paca->dispatch_log;
234 }
235 local_paca->dtl_ridx = i;
236 local_paca->dtl_curr = dtl;
237 return stolen;
238}
239
240/*
241 * Accumulate stolen time by scanning the dispatch trace log.
242 * Called on entry from user mode.
243 */
244void accumulate_stolen_time(void)
245{
246 u64 sst, ust;
247 unsigned long save_irq_soft_mask = irq_soft_mask_return();
248 struct cpu_accounting_data *acct = &local_paca->accounting;
249
250 /* We are called early in the exception entry, before
251 * soft/hard_enabled are sync'ed to the expected state
252 * for the exception. We are hard disabled but the PACA
253 * needs to reflect that so various debug stuff doesn't
254 * complain
255 */
256 irq_soft_mask_set(IRQS_DISABLED);
257
258 sst = scan_dispatch_log(acct->starttime_user);
259 ust = scan_dispatch_log(acct->starttime);
260 acct->stime -= sst;
261 acct->utime -= ust;
262 acct->steal_time += ust + sst;
263
264 irq_soft_mask_set(save_irq_soft_mask);
265}
266
267static inline u64 calculate_stolen_time(u64 stop_tb)
268{
269 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
270 return 0;
271
272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
273 return scan_dispatch_log(stop_tb);
274
275 return 0;
276}
277
278#else /* CONFIG_PPC_SPLPAR */
279static inline u64 calculate_stolen_time(u64 stop_tb)
280{
281 return 0;
282}
283
284#endif /* CONFIG_PPC_SPLPAR */
285
286/*
287 * Account time for a transition between system, hard irq
288 * or soft irq state.
289 */
290static unsigned long vtime_delta(struct task_struct *tsk,
291 unsigned long *stime_scaled,
292 unsigned long *steal_time)
293{
294 unsigned long now, nowscaled, deltascaled;
295 unsigned long stime;
296 unsigned long utime, utime_scaled;
297 struct cpu_accounting_data *acct = get_accounting(tsk);
298
299 WARN_ON_ONCE(!irqs_disabled());
300
301 now = mftb();
302 nowscaled = read_spurr(now);
303 stime = now - acct->starttime;
304 acct->starttime = now;
305 deltascaled = nowscaled - acct->startspurr;
306 acct->startspurr = nowscaled;
307
308 *steal_time = calculate_stolen_time(now);
309
310 utime = acct->utime - acct->utime_sspurr;
311 acct->utime_sspurr = acct->utime;
312
313 /*
314 * Because we don't read the SPURR on every kernel entry/exit,
315 * deltascaled includes both user and system SPURR ticks.
316 * Apportion these ticks to system SPURR ticks and user
317 * SPURR ticks in the same ratio as the system time (delta)
318 * and user time (udelta) values obtained from the timebase
319 * over the same interval. The system ticks get accounted here;
320 * the user ticks get saved up in paca->user_time_scaled to be
321 * used by account_process_tick.
322 */
323 *stime_scaled = stime;
324 utime_scaled = utime;
325 if (deltascaled != stime + utime) {
326 if (utime) {
327 *stime_scaled = deltascaled * stime / (stime + utime);
328 utime_scaled = deltascaled - *stime_scaled;
329 } else {
330 *stime_scaled = deltascaled;
331 }
332 }
333 acct->utime_scaled += utime_scaled;
334
335 return stime;
336}
337
338void vtime_account_system(struct task_struct *tsk)
339{
340 unsigned long stime, stime_scaled, steal_time;
341 struct cpu_accounting_data *acct = get_accounting(tsk);
342
343 stime = vtime_delta(tsk, &stime_scaled, &steal_time);
344
345 stime -= min(stime, steal_time);
346 acct->steal_time += steal_time;
347
348 if ((tsk->flags & PF_VCPU) && !irq_count()) {
349 acct->gtime += stime;
350 acct->utime_scaled += stime_scaled;
351 } else {
352 if (hardirq_count())
353 acct->hardirq_time += stime;
354 else if (in_serving_softirq())
355 acct->softirq_time += stime;
356 else
357 acct->stime += stime;
358
359 acct->stime_scaled += stime_scaled;
360 }
361}
362EXPORT_SYMBOL_GPL(vtime_account_system);
363
364void vtime_account_idle(struct task_struct *tsk)
365{
366 unsigned long stime, stime_scaled, steal_time;
367 struct cpu_accounting_data *acct = get_accounting(tsk);
368
369 stime = vtime_delta(tsk, &stime_scaled, &steal_time);
370 acct->idle_time += stime + steal_time;
371}
372
373/*
374 * Account the whole cputime accumulated in the paca
375 * Must be called with interrupts disabled.
376 * Assumes that vtime_account_system/idle() has been called
377 * recently (i.e. since the last entry from usermode) so that
378 * get_paca()->user_time_scaled is up to date.
379 */
380void vtime_flush(struct task_struct *tsk)
381{
382 struct cpu_accounting_data *acct = get_accounting(tsk);
383
384 if (acct->utime)
385 account_user_time(tsk, cputime_to_nsecs(acct->utime));
386
387 if (acct->utime_scaled)
388 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
389
390 if (acct->gtime)
391 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
392
393 if (acct->steal_time)
394 account_steal_time(cputime_to_nsecs(acct->steal_time));
395
396 if (acct->idle_time)
397 account_idle_time(cputime_to_nsecs(acct->idle_time));
398
399 if (acct->stime)
400 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
401 CPUTIME_SYSTEM);
402 if (acct->stime_scaled)
403 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
404
405 if (acct->hardirq_time)
406 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
407 CPUTIME_IRQ);
408 if (acct->softirq_time)
409 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
410 CPUTIME_SOFTIRQ);
411
412 acct->utime = 0;
413 acct->utime_scaled = 0;
414 acct->utime_sspurr = 0;
415 acct->gtime = 0;
416 acct->steal_time = 0;
417 acct->idle_time = 0;
418 acct->stime = 0;
419 acct->stime_scaled = 0;
420 acct->hardirq_time = 0;
421 acct->softirq_time = 0;
422}
423
424#ifdef CONFIG_PPC32
425/*
426 * Called from the context switch with interrupts disabled, to charge all
427 * accumulated times to the current process, and to prepare accounting on
428 * the next process.
429 */
430void arch_vtime_task_switch(struct task_struct *prev)
431{
432 struct cpu_accounting_data *acct = get_accounting(current);
433
434 acct->starttime = get_accounting(prev)->starttime;
435 acct->startspurr = get_accounting(prev)->startspurr;
436}
437#endif /* CONFIG_PPC32 */
438
439#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
440#define calc_cputime_factors()
441#endif
442
443void __delay(unsigned long loops)
444{
445 unsigned long start;
446 int diff;
447
448 spin_begin();
449 if (__USE_RTC()) {
450 start = get_rtcl();
451 do {
452 /* the RTCL register wraps at 1000000000 */
453 diff = get_rtcl() - start;
454 if (diff < 0)
455 diff += 1000000000;
456 spin_cpu_relax();
457 } while (diff < loops);
458 } else {
459 start = get_tbl();
460 while (get_tbl() - start < loops)
461 spin_cpu_relax();
462 }
463 spin_end();
464}
465EXPORT_SYMBOL(__delay);
466
467void udelay(unsigned long usecs)
468{
469 __delay(tb_ticks_per_usec * usecs);
470}
471EXPORT_SYMBOL(udelay);
472
473#ifdef CONFIG_SMP
474unsigned long profile_pc(struct pt_regs *regs)
475{
476 unsigned long pc = instruction_pointer(regs);
477
478 if (in_lock_functions(pc))
479 return regs->link;
480
481 return pc;
482}
483EXPORT_SYMBOL(profile_pc);
484#endif
485
486#ifdef CONFIG_IRQ_WORK
487
488/*
489 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
490 */
491#ifdef CONFIG_PPC64
492static inline unsigned long test_irq_work_pending(void)
493{
494 unsigned long x;
495
496 asm volatile("lbz %0,%1(13)"
497 : "=r" (x)
498 : "i" (offsetof(struct paca_struct, irq_work_pending)));
499 return x;
500}
501
502static inline void set_irq_work_pending_flag(void)
503{
504 asm volatile("stb %0,%1(13)" : :
505 "r" (1),
506 "i" (offsetof(struct paca_struct, irq_work_pending)));
507}
508
509static inline void clear_irq_work_pending(void)
510{
511 asm volatile("stb %0,%1(13)" : :
512 "r" (0),
513 "i" (offsetof(struct paca_struct, irq_work_pending)));
514}
515
516#else /* 32-bit */
517
518DEFINE_PER_CPU(u8, irq_work_pending);
519
520#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
521#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
522#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
523
524#endif /* 32 vs 64 bit */
525
526void arch_irq_work_raise(void)
527{
528 preempt_disable();
529 set_irq_work_pending_flag();
530 set_dec(1);
531 preempt_enable();
532}
533
534#else /* CONFIG_IRQ_WORK */
535
536#define test_irq_work_pending() 0
537#define clear_irq_work_pending()
538
539#endif /* CONFIG_IRQ_WORK */
540
541static void __timer_interrupt(void)
542{
543 struct pt_regs *regs = get_irq_regs();
544 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
545 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
546 u64 now;
547
548 trace_timer_interrupt_entry(regs);
549
550 if (test_irq_work_pending()) {
551 clear_irq_work_pending();
552 irq_work_run();
553 }
554
555 now = get_tb_or_rtc();
556 if (now >= *next_tb) {
557 *next_tb = ~(u64)0;
558 if (evt->event_handler)
559 evt->event_handler(evt);
560 __this_cpu_inc(irq_stat.timer_irqs_event);
561 } else {
562 now = *next_tb - now;
563 if (now <= decrementer_max)
564 set_dec(now);
565 /* We may have raced with new irq work */
566 if (test_irq_work_pending())
567 set_dec(1);
568 __this_cpu_inc(irq_stat.timer_irqs_others);
569 }
570
571#ifdef CONFIG_PPC64
572 /* collect purr register values often, for accurate calculations */
573 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
574 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
575 cu->current_tb = mfspr(SPRN_PURR);
576 }
577#endif
578
579 trace_timer_interrupt_exit(regs);
580}
581
582/*
583 * timer_interrupt - gets called when the decrementer overflows,
584 * with interrupts disabled.
585 */
586void timer_interrupt(struct pt_regs * regs)
587{
588 struct pt_regs *old_regs;
589 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
590
591 /* Ensure a positive value is written to the decrementer, or else
592 * some CPUs will continue to take decrementer exceptions.
593 */
594 set_dec(decrementer_max);
595
596 /* Some implementations of hotplug will get timer interrupts while
597 * offline, just ignore these and we also need to set
598 * decrementers_next_tb as MAX to make sure __check_irq_replay
599 * don't replay timer interrupt when return, otherwise we'll trap
600 * here infinitely :(
601 */
602 if (!cpu_online(smp_processor_id())) {
603 *next_tb = ~(u64)0;
604 return;
605 }
606
607 /* Conditionally hard-enable interrupts now that the DEC has been
608 * bumped to its maximum value
609 */
610 may_hard_irq_enable();
611
612
613#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
614 if (atomic_read(&ppc_n_lost_interrupts) != 0)
615 do_IRQ(regs);
616#endif
617
618 old_regs = set_irq_regs(regs);
619 irq_enter();
620
621 __timer_interrupt();
622 irq_exit();
623 set_irq_regs(old_regs);
624}
625EXPORT_SYMBOL(timer_interrupt);
626
627/*
628 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
629 * left pending on exit from a KVM guest. We don't need to do anything
630 * to clear them, as they are edge-triggered.
631 */
632void hdec_interrupt(struct pt_regs *regs)
633{
634}
635
636#ifdef CONFIG_SUSPEND
637static void generic_suspend_disable_irqs(void)
638{
639 /* Disable the decrementer, so that it doesn't interfere
640 * with suspending.
641 */
642
643 set_dec(decrementer_max);
644 local_irq_disable();
645 set_dec(decrementer_max);
646}
647
648static void generic_suspend_enable_irqs(void)
649{
650 local_irq_enable();
651}
652
653/* Overrides the weak version in kernel/power/main.c */
654void arch_suspend_disable_irqs(void)
655{
656 if (ppc_md.suspend_disable_irqs)
657 ppc_md.suspend_disable_irqs();
658 generic_suspend_disable_irqs();
659}
660
661/* Overrides the weak version in kernel/power/main.c */
662void arch_suspend_enable_irqs(void)
663{
664 generic_suspend_enable_irqs();
665 if (ppc_md.suspend_enable_irqs)
666 ppc_md.suspend_enable_irqs();
667}
668#endif
669
670unsigned long long tb_to_ns(unsigned long long ticks)
671{
672 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
673}
674EXPORT_SYMBOL_GPL(tb_to_ns);
675
676/*
677 * Scheduler clock - returns current time in nanosec units.
678 *
679 * Note: mulhdu(a, b) (multiply high double unsigned) returns
680 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
681 * are 64-bit unsigned numbers.
682 */
683notrace unsigned long long sched_clock(void)
684{
685 if (__USE_RTC())
686 return get_rtc();
687 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
688}
689
690
691#ifdef CONFIG_PPC_PSERIES
692
693/*
694 * Running clock - attempts to give a view of time passing for a virtualised
695 * kernels.
696 * Uses the VTB register if available otherwise a next best guess.
697 */
698unsigned long long running_clock(void)
699{
700 /*
701 * Don't read the VTB as a host since KVM does not switch in host
702 * timebase into the VTB when it takes a guest off the CPU, reading the
703 * VTB would result in reading 'last switched out' guest VTB.
704 *
705 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
706 * would be unsafe to rely only on the #ifdef above.
707 */
708 if (firmware_has_feature(FW_FEATURE_LPAR) &&
709 cpu_has_feature(CPU_FTR_ARCH_207S))
710 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
711
712 /*
713 * This is a next best approximation without a VTB.
714 * On a host which is running bare metal there should never be any stolen
715 * time and on a host which doesn't do any virtualisation TB *should* equal
716 * VTB so it makes no difference anyway.
717 */
718 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
719}
720#endif
721
722static int __init get_freq(char *name, int cells, unsigned long *val)
723{
724 struct device_node *cpu;
725 const __be32 *fp;
726 int found = 0;
727
728 /* The cpu node should have timebase and clock frequency properties */
729 cpu = of_find_node_by_type(NULL, "cpu");
730
731 if (cpu) {
732 fp = of_get_property(cpu, name, NULL);
733 if (fp) {
734 found = 1;
735 *val = of_read_ulong(fp, cells);
736 }
737
738 of_node_put(cpu);
739 }
740
741 return found;
742}
743
744static void start_cpu_decrementer(void)
745{
746#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
747 unsigned int tcr;
748
749 /* Clear any pending timer interrupts */
750 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
751
752 tcr = mfspr(SPRN_TCR);
753 /*
754 * The watchdog may have already been enabled by u-boot. So leave
755 * TRC[WP] (Watchdog Period) alone.
756 */
757 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
758 tcr |= TCR_DIE; /* Enable decrementer */
759 mtspr(SPRN_TCR, tcr);
760#endif
761}
762
763void __init generic_calibrate_decr(void)
764{
765 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
766
767 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
768 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
769
770 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
771 "(not found)\n");
772 }
773
774 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
775
776 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
777 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
778
779 printk(KERN_ERR "WARNING: Estimating processor frequency "
780 "(not found)\n");
781 }
782}
783
784int update_persistent_clock(struct timespec now)
785{
786 struct rtc_time tm;
787
788 if (!ppc_md.set_rtc_time)
789 return -ENODEV;
790
791 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
792 tm.tm_year -= 1900;
793 tm.tm_mon -= 1;
794
795 return ppc_md.set_rtc_time(&tm);
796}
797
798static void __read_persistent_clock(struct timespec *ts)
799{
800 struct rtc_time tm;
801 static int first = 1;
802
803 ts->tv_nsec = 0;
804 /* XXX this is a litle fragile but will work okay in the short term */
805 if (first) {
806 first = 0;
807 if (ppc_md.time_init)
808 timezone_offset = ppc_md.time_init();
809
810 /* get_boot_time() isn't guaranteed to be safe to call late */
811 if (ppc_md.get_boot_time) {
812 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
813 return;
814 }
815 }
816 if (!ppc_md.get_rtc_time) {
817 ts->tv_sec = 0;
818 return;
819 }
820 ppc_md.get_rtc_time(&tm);
821
822 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
823 tm.tm_hour, tm.tm_min, tm.tm_sec);
824}
825
826void read_persistent_clock(struct timespec *ts)
827{
828 __read_persistent_clock(ts);
829
830 /* Sanitize it in case real time clock is set below EPOCH */
831 if (ts->tv_sec < 0) {
832 ts->tv_sec = 0;
833 ts->tv_nsec = 0;
834 }
835
836}
837
838/* clocksource code */
839static notrace u64 rtc_read(struct clocksource *cs)
840{
841 return (u64)get_rtc();
842}
843
844static notrace u64 timebase_read(struct clocksource *cs)
845{
846 return (u64)get_tb();
847}
848
849
850void update_vsyscall(struct timekeeper *tk)
851{
852 struct timespec xt;
853 struct clocksource *clock = tk->tkr_mono.clock;
854 u32 mult = tk->tkr_mono.mult;
855 u32 shift = tk->tkr_mono.shift;
856 u64 cycle_last = tk->tkr_mono.cycle_last;
857 u64 new_tb_to_xs, new_stamp_xsec;
858 u64 frac_sec;
859
860 if (clock != &clocksource_timebase)
861 return;
862
863 xt.tv_sec = tk->xtime_sec;
864 xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
865
866 /* Make userspace gettimeofday spin until we're done. */
867 ++vdso_data->tb_update_count;
868 smp_mb();
869
870 /*
871 * This computes ((2^20 / 1e9) * mult) >> shift as a
872 * 0.64 fixed-point fraction.
873 * The computation in the else clause below won't overflow
874 * (as long as the timebase frequency is >= 1.049 MHz)
875 * but loses precision because we lose the low bits of the constant
876 * in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9.
877 * For a shift of 24 the error is about 0.5e-9, or about 0.5ns
878 * over a second. (Shift values are usually 22, 23 or 24.)
879 * For high frequency clocks such as the 512MHz timebase clock
880 * on POWER[6789], the mult value is small (e.g. 32768000)
881 * and so we can shift the constant by 16 initially
882 * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
883 * remaining shifts after the multiplication, which gives a
884 * more accurate result (e.g. with mult = 32768000, shift = 24,
885 * the error is only about 1.2e-12, or 0.7ns over 10 minutes).
886 */
887 if (mult <= 62500000 && clock->shift >= 16)
888 new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
889 else
890 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
891
892 /*
893 * Compute the fractional second in units of 2^-32 seconds.
894 * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
895 * in nanoseconds, so multiplying that by 2^32 / 1e9 gives
896 * it in units of 2^-32 seconds.
897 * We assume shift <= 32 because clocks_calc_mult_shift()
898 * generates shift values in the range 0 - 32.
899 */
900 frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
901 do_div(frac_sec, NSEC_PER_SEC);
902
903 /*
904 * Work out new stamp_xsec value for any legacy users of systemcfg.
905 * stamp_xsec is in units of 2^-20 seconds.
906 */
907 new_stamp_xsec = frac_sec >> 12;
908 new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
909
910 /*
911 * tb_update_count is used to allow the userspace gettimeofday code
912 * to assure itself that it sees a consistent view of the tb_to_xs and
913 * stamp_xsec variables. It reads the tb_update_count, then reads
914 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
915 * the two values of tb_update_count match and are even then the
916 * tb_to_xs and stamp_xsec values are consistent. If not, then it
917 * loops back and reads them again until this criteria is met.
918 */
919 vdso_data->tb_orig_stamp = cycle_last;
920 vdso_data->stamp_xsec = new_stamp_xsec;
921 vdso_data->tb_to_xs = new_tb_to_xs;
922 vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
923 vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
924 vdso_data->stamp_xtime = xt;
925 vdso_data->stamp_sec_fraction = frac_sec;
926 smp_wmb();
927 ++(vdso_data->tb_update_count);
928}
929
930void update_vsyscall_tz(void)
931{
932 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
933 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
934}
935
936static void __init clocksource_init(void)
937{
938 struct clocksource *clock;
939
940 if (__USE_RTC())
941 clock = &clocksource_rtc;
942 else
943 clock = &clocksource_timebase;
944
945 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
946 printk(KERN_ERR "clocksource: %s is already registered\n",
947 clock->name);
948 return;
949 }
950
951 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
952 clock->name, clock->mult, clock->shift);
953}
954
955static int decrementer_set_next_event(unsigned long evt,
956 struct clock_event_device *dev)
957{
958 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
959 set_dec(evt);
960
961 /* We may have raced with new irq work */
962 if (test_irq_work_pending())
963 set_dec(1);
964
965 return 0;
966}
967
968static int decrementer_shutdown(struct clock_event_device *dev)
969{
970 decrementer_set_next_event(decrementer_max, dev);
971 return 0;
972}
973
974/* Interrupt handler for the timer broadcast IPI */
975void tick_broadcast_ipi_handler(void)
976{
977 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
978
979 *next_tb = get_tb_or_rtc();
980 __timer_interrupt();
981}
982
983static void register_decrementer_clockevent(int cpu)
984{
985 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
986
987 *dec = decrementer_clockevent;
988 dec->cpumask = cpumask_of(cpu);
989
990 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
991 dec->name, dec->mult, dec->shift, cpu);
992
993 clockevents_register_device(dec);
994}
995
996static void enable_large_decrementer(void)
997{
998 if (!cpu_has_feature(CPU_FTR_ARCH_300))
999 return;
1000
1001 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
1002 return;
1003
1004 /*
1005 * If we're running as the hypervisor we need to enable the LD manually
1006 * otherwise firmware should have done it for us.
1007 */
1008 if (cpu_has_feature(CPU_FTR_HVMODE))
1009 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
1010}
1011
1012static void __init set_decrementer_max(void)
1013{
1014 struct device_node *cpu;
1015 u32 bits = 32;
1016
1017 /* Prior to ISAv3 the decrementer is always 32 bit */
1018 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1019 return;
1020
1021 cpu = of_find_node_by_type(NULL, "cpu");
1022
1023 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
1024 if (bits > 64 || bits < 32) {
1025 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
1026 bits = 32;
1027 }
1028
1029 /* calculate the signed maximum given this many bits */
1030 decrementer_max = (1ul << (bits - 1)) - 1;
1031 }
1032
1033 of_node_put(cpu);
1034
1035 pr_info("time_init: %u bit decrementer (max: %llx)\n",
1036 bits, decrementer_max);
1037}
1038
1039static void __init init_decrementer_clockevent(void)
1040{
1041 int cpu = smp_processor_id();
1042
1043 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
1044
1045 decrementer_clockevent.max_delta_ns =
1046 clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
1047 decrementer_clockevent.max_delta_ticks = decrementer_max;
1048 decrementer_clockevent.min_delta_ns =
1049 clockevent_delta2ns(2, &decrementer_clockevent);
1050 decrementer_clockevent.min_delta_ticks = 2;
1051
1052 register_decrementer_clockevent(cpu);
1053}
1054
1055void secondary_cpu_time_init(void)
1056{
1057 /* Enable and test the large decrementer for this cpu */
1058 enable_large_decrementer();
1059
1060 /* Start the decrementer on CPUs that have manual control
1061 * such as BookE
1062 */
1063 start_cpu_decrementer();
1064
1065 /* FIME: Should make unrelatred change to move snapshot_timebase
1066 * call here ! */
1067 register_decrementer_clockevent(smp_processor_id());
1068}
1069
1070/* This function is only called on the boot processor */
1071void __init time_init(void)
1072{
1073 struct div_result res;
1074 u64 scale;
1075 unsigned shift;
1076
1077 if (__USE_RTC()) {
1078 /* 601 processor: dec counts down by 128 every 128ns */
1079 ppc_tb_freq = 1000000000;
1080 } else {
1081 /* Normal PowerPC with timebase register */
1082 ppc_md.calibrate_decr();
1083 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1084 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1085 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1086 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1087 }
1088
1089 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
1090 tb_ticks_per_sec = ppc_tb_freq;
1091 tb_ticks_per_usec = ppc_tb_freq / 1000000;
1092 calc_cputime_factors();
1093
1094 /*
1095 * Compute scale factor for sched_clock.
1096 * The calibrate_decr() function has set tb_ticks_per_sec,
1097 * which is the timebase frequency.
1098 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1099 * the 128-bit result as a 64.64 fixed-point number.
1100 * We then shift that number right until it is less than 1.0,
1101 * giving us the scale factor and shift count to use in
1102 * sched_clock().
1103 */
1104 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
1105 scale = res.result_low;
1106 for (shift = 0; res.result_high != 0; ++shift) {
1107 scale = (scale >> 1) | (res.result_high << 63);
1108 res.result_high >>= 1;
1109 }
1110 tb_to_ns_scale = scale;
1111 tb_to_ns_shift = shift;
1112 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1113 boot_tb = get_tb_or_rtc();
1114
1115 /* If platform provided a timezone (pmac), we correct the time */
1116 if (timezone_offset) {
1117 sys_tz.tz_minuteswest = -timezone_offset / 60;
1118 sys_tz.tz_dsttime = 0;
1119 }
1120
1121 vdso_data->tb_update_count = 0;
1122 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1123
1124 /* initialise and enable the large decrementer (if we have one) */
1125 set_decrementer_max();
1126 enable_large_decrementer();
1127
1128 /* Start the decrementer on CPUs that have manual control
1129 * such as BookE
1130 */
1131 start_cpu_decrementer();
1132
1133 /* Register the clocksource */
1134 clocksource_init();
1135
1136 init_decrementer_clockevent();
1137 tick_setup_hrtimer_broadcast();
1138
1139#ifdef CONFIG_COMMON_CLK
1140 of_clk_init(NULL);
1141#endif
1142}
1143
1144
1145#define FEBRUARY 2
1146#define STARTOFTIME 1970
1147#define SECDAY 86400L
1148#define SECYR (SECDAY * 365)
1149#define leapyear(year) ((year) % 4 == 0 && \
1150 ((year) % 100 != 0 || (year) % 400 == 0))
1151#define days_in_year(a) (leapyear(a) ? 366 : 365)
1152#define days_in_month(a) (month_days[(a) - 1])
1153
1154static int month_days[12] = {
1155 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1156};
1157
1158void to_tm(int tim, struct rtc_time * tm)
1159{
1160 register int i;
1161 register long hms, day;
1162
1163 day = tim / SECDAY;
1164 hms = tim % SECDAY;
1165
1166 /* Hours, minutes, seconds are easy */
1167 tm->tm_hour = hms / 3600;
1168 tm->tm_min = (hms % 3600) / 60;
1169 tm->tm_sec = (hms % 3600) % 60;
1170
1171 /* Number of years in days */
1172 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1173 day -= days_in_year(i);
1174 tm->tm_year = i;
1175
1176 /* Number of months in days left */
1177 if (leapyear(tm->tm_year))
1178 days_in_month(FEBRUARY) = 29;
1179 for (i = 1; day >= days_in_month(i); i++)
1180 day -= days_in_month(i);
1181 days_in_month(FEBRUARY) = 28;
1182 tm->tm_mon = i;
1183
1184 /* Days are what is left over (+1) from all that. */
1185 tm->tm_mday = day + 1;
1186
1187 /*
1188 * No-one uses the day of the week.
1189 */
1190 tm->tm_wday = -1;
1191}
1192EXPORT_SYMBOL(to_tm);
1193
1194/*
1195 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1196 * result.
1197 */
1198void div128_by_32(u64 dividend_high, u64 dividend_low,
1199 unsigned divisor, struct div_result *dr)
1200{
1201 unsigned long a, b, c, d;
1202 unsigned long w, x, y, z;
1203 u64 ra, rb, rc;
1204
1205 a = dividend_high >> 32;
1206 b = dividend_high & 0xffffffff;
1207 c = dividend_low >> 32;
1208 d = dividend_low & 0xffffffff;
1209
1210 w = a / divisor;
1211 ra = ((u64)(a - (w * divisor)) << 32) + b;
1212
1213 rb = ((u64) do_div(ra, divisor) << 32) + c;
1214 x = ra;
1215
1216 rc = ((u64) do_div(rb, divisor) << 32) + d;
1217 y = rb;
1218
1219 do_div(rc, divisor);
1220 z = rc;
1221
1222 dr->result_high = ((u64)w << 32) + x;
1223 dr->result_low = ((u64)y << 32) + z;
1224
1225}
1226
1227/* We don't need to calibrate delay, we use the CPU timebase for that */
1228void calibrate_delay(void)
1229{
1230 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1231 * as the number of __delay(1) in a jiffy, so make it so
1232 */
1233 loops_per_jiffy = tb_ticks_per_jiffy;
1234}
1235
1236#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1237static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
1238{
1239 ppc_md.get_rtc_time(tm);
1240 return 0;
1241}
1242
1243static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1244{
1245 if (!ppc_md.set_rtc_time)
1246 return -EOPNOTSUPP;
1247
1248 if (ppc_md.set_rtc_time(tm) < 0)
1249 return -EOPNOTSUPP;
1250
1251 return 0;
1252}
1253
1254static const struct rtc_class_ops rtc_generic_ops = {
1255 .read_time = rtc_generic_get_time,
1256 .set_time = rtc_generic_set_time,
1257};
1258
1259static int __init rtc_init(void)
1260{
1261 struct platform_device *pdev;
1262
1263 if (!ppc_md.get_rtc_time)
1264 return -ENODEV;
1265
1266 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1267 &rtc_generic_ops,
1268 sizeof(rtc_generic_ops));
1269
1270 return PTR_ERR_OR_ZERO(pdev);
1271}
1272
1273device_initcall(rtc_init);
1274#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Common time routines among all ppc machines.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6 * Paul Mackerras' version and mine for PReP and Pmac.
7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 *
10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
15 *
16 * Speeded up do_gettimeofday by getting rid of references to
17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 *
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time.
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 */
29
30#include <linux/errno.h>
31#include <linux/export.h>
32#include <linux/sched.h>
33#include <linux/sched/clock.h>
34#include <linux/sched/cputime.h>
35#include <linux/kernel.h>
36#include <linux/param.h>
37#include <linux/string.h>
38#include <linux/mm.h>
39#include <linux/interrupt.h>
40#include <linux/timex.h>
41#include <linux/kernel_stat.h>
42#include <linux/time.h>
43#include <linux/init.h>
44#include <linux/profile.h>
45#include <linux/cpu.h>
46#include <linux/security.h>
47#include <linux/percpu.h>
48#include <linux/rtc.h>
49#include <linux/jiffies.h>
50#include <linux/posix-timers.h>
51#include <linux/irq.h>
52#include <linux/delay.h>
53#include <linux/irq_work.h>
54#include <linux/of_clk.h>
55#include <linux/suspend.h>
56#include <linux/processor.h>
57#include <linux/mc146818rtc.h>
58#include <linux/platform_device.h>
59
60#include <asm/trace.h>
61#include <asm/interrupt.h>
62#include <asm/io.h>
63#include <asm/nvram.h>
64#include <asm/cache.h>
65#include <asm/machdep.h>
66#include <linux/uaccess.h>
67#include <asm/time.h>
68#include <asm/irq.h>
69#include <asm/div64.h>
70#include <asm/smp.h>
71#include <asm/vdso_datapage.h>
72#include <asm/firmware.h>
73#include <asm/mce.h>
74
75/* powerpc clocksource/clockevent code */
76
77#include <linux/clockchips.h>
78#include <linux/timekeeper_internal.h>
79
80static u64 timebase_read(struct clocksource *);
81static struct clocksource clocksource_timebase = {
82 .name = "timebase",
83 .rating = 400,
84 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
85 .mask = CLOCKSOURCE_MASK(64),
86 .read = timebase_read,
87 .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
88};
89
90#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
91u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
92EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
93
94static int decrementer_set_next_event(unsigned long evt,
95 struct clock_event_device *dev);
96static int decrementer_shutdown(struct clock_event_device *evt);
97
98struct clock_event_device decrementer_clockevent = {
99 .name = "decrementer",
100 .rating = 200,
101 .irq = 0,
102 .set_next_event = decrementer_set_next_event,
103 .set_state_oneshot_stopped = decrementer_shutdown,
104 .set_state_shutdown = decrementer_shutdown,
105 .tick_resume = decrementer_shutdown,
106 .features = CLOCK_EVT_FEAT_ONESHOT |
107 CLOCK_EVT_FEAT_C3STOP,
108};
109EXPORT_SYMBOL(decrementer_clockevent);
110
111/*
112 * This always puts next_tb beyond now, so the clock event will never fire
113 * with the usual comparison, no need for a separate test for stopped.
114 */
115#define DEC_CLOCKEVENT_STOPPED ~0ULL
116DEFINE_PER_CPU(u64, decrementers_next_tb) = DEC_CLOCKEVENT_STOPPED;
117EXPORT_SYMBOL_GPL(decrementers_next_tb);
118static DEFINE_PER_CPU(struct clock_event_device, decrementers);
119
120#define XSEC_PER_SEC (1024*1024)
121
122#ifdef CONFIG_PPC64
123#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
124#else
125/* compute ((xsec << 12) * max) >> 32 */
126#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
127#endif
128
129unsigned long tb_ticks_per_jiffy;
130unsigned long tb_ticks_per_usec = 100; /* sane default */
131EXPORT_SYMBOL(tb_ticks_per_usec);
132unsigned long tb_ticks_per_sec;
133EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime conversions */
134
135DEFINE_SPINLOCK(rtc_lock);
136EXPORT_SYMBOL_GPL(rtc_lock);
137
138static u64 tb_to_ns_scale __read_mostly;
139static unsigned tb_to_ns_shift __read_mostly;
140static u64 boot_tb __read_mostly;
141
142extern struct timezone sys_tz;
143static long timezone_offset;
144
145unsigned long ppc_proc_freq;
146EXPORT_SYMBOL_GPL(ppc_proc_freq);
147unsigned long ppc_tb_freq;
148EXPORT_SYMBOL_GPL(ppc_tb_freq);
149
150bool tb_invalid;
151
152#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
153/*
154 * Read the SPURR on systems that have it, otherwise the PURR,
155 * or if that doesn't exist return the timebase value passed in.
156 */
157static inline unsigned long read_spurr(unsigned long tb)
158{
159 if (cpu_has_feature(CPU_FTR_SPURR))
160 return mfspr(SPRN_SPURR);
161 if (cpu_has_feature(CPU_FTR_PURR))
162 return mfspr(SPRN_PURR);
163 return tb;
164}
165
166/*
167 * Account time for a transition between system, hard irq
168 * or soft irq state.
169 */
170static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
171 unsigned long now, unsigned long stime)
172{
173 unsigned long stime_scaled = 0;
174#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
175 unsigned long nowscaled, deltascaled;
176 unsigned long utime, utime_scaled;
177
178 nowscaled = read_spurr(now);
179 deltascaled = nowscaled - acct->startspurr;
180 acct->startspurr = nowscaled;
181 utime = acct->utime - acct->utime_sspurr;
182 acct->utime_sspurr = acct->utime;
183
184 /*
185 * Because we don't read the SPURR on every kernel entry/exit,
186 * deltascaled includes both user and system SPURR ticks.
187 * Apportion these ticks to system SPURR ticks and user
188 * SPURR ticks in the same ratio as the system time (delta)
189 * and user time (udelta) values obtained from the timebase
190 * over the same interval. The system ticks get accounted here;
191 * the user ticks get saved up in paca->user_time_scaled to be
192 * used by account_process_tick.
193 */
194 stime_scaled = stime;
195 utime_scaled = utime;
196 if (deltascaled != stime + utime) {
197 if (utime) {
198 stime_scaled = deltascaled * stime / (stime + utime);
199 utime_scaled = deltascaled - stime_scaled;
200 } else {
201 stime_scaled = deltascaled;
202 }
203 }
204 acct->utime_scaled += utime_scaled;
205#endif
206
207 return stime_scaled;
208}
209
210static unsigned long vtime_delta(struct cpu_accounting_data *acct,
211 unsigned long *stime_scaled,
212 unsigned long *steal_time)
213{
214 unsigned long now, stime;
215
216 WARN_ON_ONCE(!irqs_disabled());
217
218 now = mftb();
219 stime = now - acct->starttime;
220 acct->starttime = now;
221
222 *stime_scaled = vtime_delta_scaled(acct, now, stime);
223
224 if (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
225 firmware_has_feature(FW_FEATURE_SPLPAR))
226 *steal_time = pseries_calculate_stolen_time(now);
227 else
228 *steal_time = 0;
229
230 return stime;
231}
232
233static void vtime_delta_kernel(struct cpu_accounting_data *acct,
234 unsigned long *stime, unsigned long *stime_scaled)
235{
236 unsigned long steal_time;
237
238 *stime = vtime_delta(acct, stime_scaled, &steal_time);
239 *stime -= min(*stime, steal_time);
240 acct->steal_time += steal_time;
241}
242
243void vtime_account_kernel(struct task_struct *tsk)
244{
245 struct cpu_accounting_data *acct = get_accounting(tsk);
246 unsigned long stime, stime_scaled;
247
248 vtime_delta_kernel(acct, &stime, &stime_scaled);
249
250 if (tsk->flags & PF_VCPU) {
251 acct->gtime += stime;
252#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
253 acct->utime_scaled += stime_scaled;
254#endif
255 } else {
256 acct->stime += stime;
257#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
258 acct->stime_scaled += stime_scaled;
259#endif
260 }
261}
262EXPORT_SYMBOL_GPL(vtime_account_kernel);
263
264void vtime_account_idle(struct task_struct *tsk)
265{
266 unsigned long stime, stime_scaled, steal_time;
267 struct cpu_accounting_data *acct = get_accounting(tsk);
268
269 stime = vtime_delta(acct, &stime_scaled, &steal_time);
270 acct->idle_time += stime + steal_time;
271}
272
273static void vtime_account_irq_field(struct cpu_accounting_data *acct,
274 unsigned long *field)
275{
276 unsigned long stime, stime_scaled;
277
278 vtime_delta_kernel(acct, &stime, &stime_scaled);
279 *field += stime;
280#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
281 acct->stime_scaled += stime_scaled;
282#endif
283}
284
285void vtime_account_softirq(struct task_struct *tsk)
286{
287 struct cpu_accounting_data *acct = get_accounting(tsk);
288 vtime_account_irq_field(acct, &acct->softirq_time);
289}
290
291void vtime_account_hardirq(struct task_struct *tsk)
292{
293 struct cpu_accounting_data *acct = get_accounting(tsk);
294 vtime_account_irq_field(acct, &acct->hardirq_time);
295}
296
297static void vtime_flush_scaled(struct task_struct *tsk,
298 struct cpu_accounting_data *acct)
299{
300#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
301 if (acct->utime_scaled)
302 tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
303 if (acct->stime_scaled)
304 tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
305
306 acct->utime_scaled = 0;
307 acct->utime_sspurr = 0;
308 acct->stime_scaled = 0;
309#endif
310}
311
312/*
313 * Account the whole cputime accumulated in the paca
314 * Must be called with interrupts disabled.
315 * Assumes that vtime_account_kernel/idle() has been called
316 * recently (i.e. since the last entry from usermode) so that
317 * get_paca()->user_time_scaled is up to date.
318 */
319void vtime_flush(struct task_struct *tsk)
320{
321 struct cpu_accounting_data *acct = get_accounting(tsk);
322
323 if (acct->utime)
324 account_user_time(tsk, cputime_to_nsecs(acct->utime));
325
326 if (acct->gtime)
327 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
328
329 if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
330 account_steal_time(cputime_to_nsecs(acct->steal_time));
331 acct->steal_time = 0;
332 }
333
334 if (acct->idle_time)
335 account_idle_time(cputime_to_nsecs(acct->idle_time));
336
337 if (acct->stime)
338 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
339 CPUTIME_SYSTEM);
340
341 if (acct->hardirq_time)
342 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
343 CPUTIME_IRQ);
344 if (acct->softirq_time)
345 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
346 CPUTIME_SOFTIRQ);
347
348 vtime_flush_scaled(tsk, acct);
349
350 acct->utime = 0;
351 acct->gtime = 0;
352 acct->idle_time = 0;
353 acct->stime = 0;
354 acct->hardirq_time = 0;
355 acct->softirq_time = 0;
356}
357#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
358
359void __no_kcsan __delay(unsigned long loops)
360{
361 unsigned long start;
362
363 spin_begin();
364 if (tb_invalid) {
365 /*
366 * TB is in error state and isn't ticking anymore.
367 * HMI handler was unable to recover from TB error.
368 * Return immediately, so that kernel won't get stuck here.
369 */
370 spin_cpu_relax();
371 } else {
372 start = mftb();
373 while (mftb() - start < loops)
374 spin_cpu_relax();
375 }
376 spin_end();
377}
378EXPORT_SYMBOL(__delay);
379
380void __no_kcsan udelay(unsigned long usecs)
381{
382 __delay(tb_ticks_per_usec * usecs);
383}
384EXPORT_SYMBOL(udelay);
385
386#ifdef CONFIG_SMP
387unsigned long profile_pc(struct pt_regs *regs)
388{
389 unsigned long pc = instruction_pointer(regs);
390
391 if (in_lock_functions(pc))
392 return regs->link;
393
394 return pc;
395}
396EXPORT_SYMBOL(profile_pc);
397#endif
398
399#ifdef CONFIG_IRQ_WORK
400
401/*
402 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
403 */
404#ifdef CONFIG_PPC64
405static inline unsigned long test_irq_work_pending(void)
406{
407 unsigned long x;
408
409 asm volatile("lbz %0,%1(13)"
410 : "=r" (x)
411 : "i" (offsetof(struct paca_struct, irq_work_pending)));
412 return x;
413}
414
415static inline void set_irq_work_pending_flag(void)
416{
417 asm volatile("stb %0,%1(13)" : :
418 "r" (1),
419 "i" (offsetof(struct paca_struct, irq_work_pending)));
420}
421
422static inline void clear_irq_work_pending(void)
423{
424 asm volatile("stb %0,%1(13)" : :
425 "r" (0),
426 "i" (offsetof(struct paca_struct, irq_work_pending)));
427}
428
429#else /* 32-bit */
430
431DEFINE_PER_CPU(u8, irq_work_pending);
432
433#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
434#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
435#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
436
437#endif /* 32 vs 64 bit */
438
439void arch_irq_work_raise(void)
440{
441 /*
442 * 64-bit code that uses irq soft-mask can just cause an immediate
443 * interrupt here that gets soft masked, if this is called under
444 * local_irq_disable(). It might be possible to prevent that happening
445 * by noticing interrupts are disabled and setting decrementer pending
446 * to be replayed when irqs are enabled. The problem there is that
447 * tracing can call irq_work_raise, including in code that does low
448 * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
449 * which could get tangled up if we're messing with the same state
450 * here.
451 */
452 preempt_disable();
453 set_irq_work_pending_flag();
454 set_dec(1);
455 preempt_enable();
456}
457
458static void set_dec_or_work(u64 val)
459{
460 set_dec(val);
461 /* We may have raced with new irq work */
462 if (unlikely(test_irq_work_pending()))
463 set_dec(1);
464}
465
466#else /* CONFIG_IRQ_WORK */
467
468#define test_irq_work_pending() 0
469#define clear_irq_work_pending()
470
471static void set_dec_or_work(u64 val)
472{
473 set_dec(val);
474}
475#endif /* CONFIG_IRQ_WORK */
476
477#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
478void timer_rearm_host_dec(u64 now)
479{
480 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
481
482 WARN_ON_ONCE(!arch_irqs_disabled());
483 WARN_ON_ONCE(mfmsr() & MSR_EE);
484
485 if (now >= *next_tb) {
486 local_paca->irq_happened |= PACA_IRQ_DEC;
487 } else {
488 now = *next_tb - now;
489 if (now > decrementer_max)
490 now = decrementer_max;
491 set_dec_or_work(now);
492 }
493}
494EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
495#endif
496
497/*
498 * timer_interrupt - gets called when the decrementer overflows,
499 * with interrupts disabled.
500 */
501DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
502{
503 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
504 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
505 struct pt_regs *old_regs;
506 u64 now;
507
508 /*
509 * Some implementations of hotplug will get timer interrupts while
510 * offline, just ignore these.
511 */
512 if (unlikely(!cpu_online(smp_processor_id()))) {
513 set_dec(decrementer_max);
514 return;
515 }
516
517 /* Conditionally hard-enable interrupts. */
518 if (should_hard_irq_enable(regs)) {
519 /*
520 * Ensure a positive value is written to the decrementer, or
521 * else some CPUs will continue to take decrementer exceptions.
522 * When the PPC_WATCHDOG (decrementer based) is configured,
523 * keep this at most 31 bits, which is about 4 seconds on most
524 * systems, which gives the watchdog a chance of catching timer
525 * interrupt hard lockups.
526 */
527 if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
528 set_dec(0x7fffffff);
529 else
530 set_dec(decrementer_max);
531
532 do_hard_irq_enable();
533 }
534
535#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
536 if (atomic_read(&ppc_n_lost_interrupts) != 0)
537 __do_IRQ(regs);
538#endif
539
540 old_regs = set_irq_regs(regs);
541
542 trace_timer_interrupt_entry(regs);
543
544 if (test_irq_work_pending()) {
545 clear_irq_work_pending();
546 mce_run_irq_context_handlers();
547 irq_work_run();
548 }
549
550 now = get_tb();
551 if (now >= *next_tb) {
552 evt->event_handler(evt);
553 __this_cpu_inc(irq_stat.timer_irqs_event);
554 } else {
555 now = *next_tb - now;
556 if (now > decrementer_max)
557 now = decrementer_max;
558 set_dec_or_work(now);
559 __this_cpu_inc(irq_stat.timer_irqs_others);
560 }
561
562 trace_timer_interrupt_exit(regs);
563
564 set_irq_regs(old_regs);
565}
566EXPORT_SYMBOL(timer_interrupt);
567
568#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
569void timer_broadcast_interrupt(void)
570{
571 tick_receive_broadcast();
572 __this_cpu_inc(irq_stat.broadcast_irqs_event);
573}
574#endif
575
576#ifdef CONFIG_SUSPEND
577/* Overrides the weak version in kernel/power/main.c */
578void arch_suspend_disable_irqs(void)
579{
580 if (ppc_md.suspend_disable_irqs)
581 ppc_md.suspend_disable_irqs();
582
583 /* Disable the decrementer, so that it doesn't interfere
584 * with suspending.
585 */
586
587 set_dec(decrementer_max);
588 local_irq_disable();
589 set_dec(decrementer_max);
590}
591
592/* Overrides the weak version in kernel/power/main.c */
593void arch_suspend_enable_irqs(void)
594{
595 local_irq_enable();
596
597 if (ppc_md.suspend_enable_irqs)
598 ppc_md.suspend_enable_irqs();
599}
600#endif
601
602unsigned long long tb_to_ns(unsigned long long ticks)
603{
604 return mulhdu(ticks, tb_to_ns_scale) << tb_to_ns_shift;
605}
606EXPORT_SYMBOL_GPL(tb_to_ns);
607
608/*
609 * Scheduler clock - returns current time in nanosec units.
610 *
611 * Note: mulhdu(a, b) (multiply high double unsigned) returns
612 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
613 * are 64-bit unsigned numbers.
614 */
615notrace unsigned long long sched_clock(void)
616{
617 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
618}
619
620
621#ifdef CONFIG_PPC_PSERIES
622
623/*
624 * Running clock - attempts to give a view of time passing for a virtualised
625 * kernels.
626 * Uses the VTB register if available otherwise a next best guess.
627 */
628unsigned long long running_clock(void)
629{
630 /*
631 * Don't read the VTB as a host since KVM does not switch in host
632 * timebase into the VTB when it takes a guest off the CPU, reading the
633 * VTB would result in reading 'last switched out' guest VTB.
634 *
635 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
636 * would be unsafe to rely only on the #ifdef above.
637 */
638 if (firmware_has_feature(FW_FEATURE_LPAR) &&
639 cpu_has_feature(CPU_FTR_ARCH_207S))
640 return mulhdu(get_vtb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
641
642 /*
643 * This is a next best approximation without a VTB.
644 * On a host which is running bare metal there should never be any stolen
645 * time and on a host which doesn't do any virtualisation TB *should* equal
646 * VTB so it makes no difference anyway.
647 */
648 return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
649}
650#endif
651
652static int __init get_freq(char *name, int cells, unsigned long *val)
653{
654 struct device_node *cpu;
655 const __be32 *fp;
656 int found = 0;
657
658 /* The cpu node should have timebase and clock frequency properties */
659 cpu = of_find_node_by_type(NULL, "cpu");
660
661 if (cpu) {
662 fp = of_get_property(cpu, name, NULL);
663 if (fp) {
664 found = 1;
665 *val = of_read_ulong(fp, cells);
666 }
667
668 of_node_put(cpu);
669 }
670
671 return found;
672}
673
674static void start_cpu_decrementer(void)
675{
676#ifdef CONFIG_BOOKE_OR_40x
677 unsigned int tcr;
678
679 /* Clear any pending timer interrupts */
680 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
681
682 tcr = mfspr(SPRN_TCR);
683 /*
684 * The watchdog may have already been enabled by u-boot. So leave
685 * TRC[WP] (Watchdog Period) alone.
686 */
687 tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
688 tcr |= TCR_DIE; /* Enable decrementer */
689 mtspr(SPRN_TCR, tcr);
690#endif
691}
692
693void __init generic_calibrate_decr(void)
694{
695 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
696
697 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
698 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
699
700 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
701 "(not found)\n");
702 }
703
704 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
705
706 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
707 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
708
709 printk(KERN_ERR "WARNING: Estimating processor frequency "
710 "(not found)\n");
711 }
712}
713
714int update_persistent_clock64(struct timespec64 now)
715{
716 struct rtc_time tm;
717
718 if (!ppc_md.set_rtc_time)
719 return -ENODEV;
720
721 rtc_time64_to_tm(now.tv_sec + 1 + timezone_offset, &tm);
722
723 return ppc_md.set_rtc_time(&tm);
724}
725
726static void __read_persistent_clock(struct timespec64 *ts)
727{
728 struct rtc_time tm;
729 static int first = 1;
730
731 ts->tv_nsec = 0;
732 /* XXX this is a little fragile but will work okay in the short term */
733 if (first) {
734 first = 0;
735 if (ppc_md.time_init)
736 timezone_offset = ppc_md.time_init();
737
738 /* get_boot_time() isn't guaranteed to be safe to call late */
739 if (ppc_md.get_boot_time) {
740 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
741 return;
742 }
743 }
744 if (!ppc_md.get_rtc_time) {
745 ts->tv_sec = 0;
746 return;
747 }
748 ppc_md.get_rtc_time(&tm);
749
750 ts->tv_sec = rtc_tm_to_time64(&tm);
751}
752
753void read_persistent_clock64(struct timespec64 *ts)
754{
755 __read_persistent_clock(ts);
756
757 /* Sanitize it in case real time clock is set below EPOCH */
758 if (ts->tv_sec < 0) {
759 ts->tv_sec = 0;
760 ts->tv_nsec = 0;
761 }
762
763}
764
765/* clocksource code */
766static notrace u64 timebase_read(struct clocksource *cs)
767{
768 return (u64)get_tb();
769}
770
771static void __init clocksource_init(void)
772{
773 struct clocksource *clock = &clocksource_timebase;
774
775 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
776 printk(KERN_ERR "clocksource: %s is already registered\n",
777 clock->name);
778 return;
779 }
780
781 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
782 clock->name, clock->mult, clock->shift);
783}
784
785static int decrementer_set_next_event(unsigned long evt,
786 struct clock_event_device *dev)
787{
788 __this_cpu_write(decrementers_next_tb, get_tb() + evt);
789 set_dec_or_work(evt);
790
791 return 0;
792}
793
794static int decrementer_shutdown(struct clock_event_device *dev)
795{
796 __this_cpu_write(decrementers_next_tb, DEC_CLOCKEVENT_STOPPED);
797 set_dec_or_work(decrementer_max);
798
799 return 0;
800}
801
802static void register_decrementer_clockevent(int cpu)
803{
804 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
805
806 *dec = decrementer_clockevent;
807 dec->cpumask = cpumask_of(cpu);
808
809 clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
810
811 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
812 dec->name, dec->mult, dec->shift, cpu);
813
814 /* Set values for KVM, see kvm_emulate_dec() */
815 decrementer_clockevent.mult = dec->mult;
816 decrementer_clockevent.shift = dec->shift;
817}
818
819static void enable_large_decrementer(void)
820{
821 if (!cpu_has_feature(CPU_FTR_ARCH_300))
822 return;
823
824 if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
825 return;
826
827 /*
828 * If we're running as the hypervisor we need to enable the LD manually
829 * otherwise firmware should have done it for us.
830 */
831 if (cpu_has_feature(CPU_FTR_HVMODE))
832 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
833}
834
835static void __init set_decrementer_max(void)
836{
837 struct device_node *cpu;
838 u32 bits = 32;
839
840 /* Prior to ISAv3 the decrementer is always 32 bit */
841 if (!cpu_has_feature(CPU_FTR_ARCH_300))
842 return;
843
844 cpu = of_find_node_by_type(NULL, "cpu");
845
846 if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
847 if (bits > 64 || bits < 32) {
848 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
849 bits = 32;
850 }
851
852 /* calculate the signed maximum given this many bits */
853 decrementer_max = (1ul << (bits - 1)) - 1;
854 }
855
856 of_node_put(cpu);
857
858 pr_info("time_init: %u bit decrementer (max: %llx)\n",
859 bits, decrementer_max);
860}
861
862static void __init init_decrementer_clockevent(void)
863{
864 register_decrementer_clockevent(smp_processor_id());
865}
866
867void secondary_cpu_time_init(void)
868{
869 /* Enable and test the large decrementer for this cpu */
870 enable_large_decrementer();
871
872 /* Start the decrementer on CPUs that have manual control
873 * such as BookE
874 */
875 start_cpu_decrementer();
876
877 /* FIME: Should make unrelated change to move snapshot_timebase
878 * call here ! */
879 register_decrementer_clockevent(smp_processor_id());
880}
881
882/* This function is only called on the boot processor */
883void __init time_init(void)
884{
885 struct div_result res;
886 u64 scale;
887 unsigned shift;
888
889 /* Normal PowerPC with timebase register */
890 if (ppc_md.calibrate_decr)
891 ppc_md.calibrate_decr();
892 else
893 generic_calibrate_decr();
894
895 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
896 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
897 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
898 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
899
900 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
901 tb_ticks_per_sec = ppc_tb_freq;
902 tb_ticks_per_usec = ppc_tb_freq / 1000000;
903
904 /*
905 * Compute scale factor for sched_clock.
906 * The calibrate_decr() function has set tb_ticks_per_sec,
907 * which is the timebase frequency.
908 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
909 * the 128-bit result as a 64.64 fixed-point number.
910 * We then shift that number right until it is less than 1.0,
911 * giving us the scale factor and shift count to use in
912 * sched_clock().
913 */
914 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
915 scale = res.result_low;
916 for (shift = 0; res.result_high != 0; ++shift) {
917 scale = (scale >> 1) | (res.result_high << 63);
918 res.result_high >>= 1;
919 }
920 tb_to_ns_scale = scale;
921 tb_to_ns_shift = shift;
922 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
923 boot_tb = get_tb();
924
925 /* If platform provided a timezone (pmac), we correct the time */
926 if (timezone_offset) {
927 sys_tz.tz_minuteswest = -timezone_offset / 60;
928 sys_tz.tz_dsttime = 0;
929 }
930
931 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
932
933 /* initialise and enable the large decrementer (if we have one) */
934 set_decrementer_max();
935 enable_large_decrementer();
936
937 /* Start the decrementer on CPUs that have manual control
938 * such as BookE
939 */
940 start_cpu_decrementer();
941
942 /* Register the clocksource */
943 clocksource_init();
944
945 init_decrementer_clockevent();
946 tick_setup_hrtimer_broadcast();
947
948 of_clk_init(NULL);
949 enable_sched_clock_irqtime();
950}
951
952/*
953 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
954 * result.
955 */
956void div128_by_32(u64 dividend_high, u64 dividend_low,
957 unsigned divisor, struct div_result *dr)
958{
959 unsigned long a, b, c, d;
960 unsigned long w, x, y, z;
961 u64 ra, rb, rc;
962
963 a = dividend_high >> 32;
964 b = dividend_high & 0xffffffff;
965 c = dividend_low >> 32;
966 d = dividend_low & 0xffffffff;
967
968 w = a / divisor;
969 ra = ((u64)(a - (w * divisor)) << 32) + b;
970
971 rb = ((u64) do_div(ra, divisor) << 32) + c;
972 x = ra;
973
974 rc = ((u64) do_div(rb, divisor) << 32) + d;
975 y = rb;
976
977 do_div(rc, divisor);
978 z = rc;
979
980 dr->result_high = ((u64)w << 32) + x;
981 dr->result_low = ((u64)y << 32) + z;
982
983}
984
985/* We don't need to calibrate delay, we use the CPU timebase for that */
986void calibrate_delay(void)
987{
988 /* Some generic code (such as spinlock debug) use loops_per_jiffy
989 * as the number of __delay(1) in a jiffy, so make it so
990 */
991 loops_per_jiffy = tb_ticks_per_jiffy;
992}
993
994#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
995static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
996{
997 ppc_md.get_rtc_time(tm);
998 return 0;
999}
1000
1001static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
1002{
1003 if (!ppc_md.set_rtc_time)
1004 return -EOPNOTSUPP;
1005
1006 if (ppc_md.set_rtc_time(tm) < 0)
1007 return -EOPNOTSUPP;
1008
1009 return 0;
1010}
1011
1012static const struct rtc_class_ops rtc_generic_ops = {
1013 .read_time = rtc_generic_get_time,
1014 .set_time = rtc_generic_set_time,
1015};
1016
1017static int __init rtc_init(void)
1018{
1019 struct platform_device *pdev;
1020
1021 if (!ppc_md.get_rtc_time)
1022 return -ENODEV;
1023
1024 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
1025 &rtc_generic_ops,
1026 sizeof(rtc_generic_ops));
1027
1028 return PTR_ERR_OR_ZERO(pdev);
1029}
1030
1031device_initcall(rtc_init);
1032#endif