Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/ia64/kernel/time.c
4 *
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * David Mosberger <davidm@hpl.hp.com>
8 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
9 * Copyright (C) 1999-2000 VA Linux Systems
10 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
11 */
12
13#include <linux/cpu.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/profile.h>
18#include <linux/sched.h>
19#include <linux/time.h>
20#include <linux/nmi.h>
21#include <linux/interrupt.h>
22#include <linux/efi.h>
23#include <linux/timex.h>
24#include <linux/timekeeper_internal.h>
25#include <linux/platform_device.h>
26#include <linux/sched/cputime.h>
27
28#include <asm/delay.h>
29#include <asm/hw_irq.h>
30#include <asm/ptrace.h>
31#include <asm/sal.h>
32#include <asm/sections.h>
33
34#include "fsyscall_gtod_data.h"
35
36static u64 itc_get_cycles(struct clocksource *cs);
37
38struct fsyscall_gtod_data_t fsyscall_gtod_data;
39
40struct itc_jitter_data_t itc_jitter_data;
41
42volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
43
44#ifdef CONFIG_IA64_DEBUG_IRQ
45
46unsigned long last_cli_ip;
47EXPORT_SYMBOL(last_cli_ip);
48
49#endif
50
51static struct clocksource clocksource_itc = {
52 .name = "itc",
53 .rating = 350,
54 .read = itc_get_cycles,
55 .mask = CLOCKSOURCE_MASK(64),
56 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
57};
58static struct clocksource *itc_clocksource;
59
60#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
61
62#include <linux/kernel_stat.h>
63
64extern u64 cycle_to_nsec(u64 cyc);
65
66void vtime_flush(struct task_struct *tsk)
67{
68 struct thread_info *ti = task_thread_info(tsk);
69 u64 delta;
70
71 if (ti->utime)
72 account_user_time(tsk, cycle_to_nsec(ti->utime));
73
74 if (ti->gtime)
75 account_guest_time(tsk, cycle_to_nsec(ti->gtime));
76
77 if (ti->idle_time)
78 account_idle_time(cycle_to_nsec(ti->idle_time));
79
80 if (ti->stime) {
81 delta = cycle_to_nsec(ti->stime);
82 account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
83 }
84
85 if (ti->hardirq_time) {
86 delta = cycle_to_nsec(ti->hardirq_time);
87 account_system_index_time(tsk, delta, CPUTIME_IRQ);
88 }
89
90 if (ti->softirq_time) {
91 delta = cycle_to_nsec(ti->softirq_time);
92 account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
93 }
94
95 ti->utime = 0;
96 ti->gtime = 0;
97 ti->idle_time = 0;
98 ti->stime = 0;
99 ti->hardirq_time = 0;
100 ti->softirq_time = 0;
101}
102
103/*
104 * Called from the context switch with interrupts disabled, to charge all
105 * accumulated times to the current process, and to prepare accounting on
106 * the next process.
107 */
108void arch_vtime_task_switch(struct task_struct *prev)
109{
110 struct thread_info *pi = task_thread_info(prev);
111 struct thread_info *ni = task_thread_info(current);
112
113 ni->ac_stamp = pi->ac_stamp;
114 ni->ac_stime = ni->ac_utime = 0;
115}
116
117/*
118 * Account time for a transition between system, hard irq or soft irq state.
119 * Note that this function is called with interrupts enabled.
120 */
121static __u64 vtime_delta(struct task_struct *tsk)
122{
123 struct thread_info *ti = task_thread_info(tsk);
124 __u64 now, delta_stime;
125
126 WARN_ON_ONCE(!irqs_disabled());
127
128 now = ia64_get_itc();
129 delta_stime = now - ti->ac_stamp;
130 ti->ac_stamp = now;
131
132 return delta_stime;
133}
134
135void vtime_account_system(struct task_struct *tsk)
136{
137 struct thread_info *ti = task_thread_info(tsk);
138 __u64 stime = vtime_delta(tsk);
139
140 if ((tsk->flags & PF_VCPU) && !irq_count())
141 ti->gtime += stime;
142 else if (hardirq_count())
143 ti->hardirq_time += stime;
144 else if (in_serving_softirq())
145 ti->softirq_time += stime;
146 else
147 ti->stime += stime;
148}
149EXPORT_SYMBOL_GPL(vtime_account_system);
150
151void vtime_account_idle(struct task_struct *tsk)
152{
153 struct thread_info *ti = task_thread_info(tsk);
154
155 ti->idle_time += vtime_delta(tsk);
156}
157
158#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
159
160static irqreturn_t
161timer_interrupt (int irq, void *dev_id)
162{
163 unsigned long new_itm;
164
165 if (cpu_is_offline(smp_processor_id())) {
166 return IRQ_HANDLED;
167 }
168
169 new_itm = local_cpu_data->itm_next;
170
171 if (!time_after(ia64_get_itc(), new_itm))
172 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
173 ia64_get_itc(), new_itm);
174
175 profile_tick(CPU_PROFILING);
176
177 while (1) {
178 update_process_times(user_mode(get_irq_regs()));
179
180 new_itm += local_cpu_data->itm_delta;
181
182 if (smp_processor_id() == time_keeper_id)
183 xtime_update(1);
184
185 local_cpu_data->itm_next = new_itm;
186
187 if (time_after(new_itm, ia64_get_itc()))
188 break;
189
190 /*
191 * Allow IPIs to interrupt the timer loop.
192 */
193 local_irq_enable();
194 local_irq_disable();
195 }
196
197 do {
198 /*
199 * If we're too close to the next clock tick for
200 * comfort, we increase the safety margin by
201 * intentionally dropping the next tick(s). We do NOT
202 * update itm.next because that would force us to call
203 * xtime_update() which in turn would let our clock run
204 * too fast (with the potentially devastating effect
205 * of losing monotony of time).
206 */
207 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
208 new_itm += local_cpu_data->itm_delta;
209 ia64_set_itm(new_itm);
210 /* double check, in case we got hit by a (slow) PMI: */
211 } while (time_after_eq(ia64_get_itc(), new_itm));
212 return IRQ_HANDLED;
213}
214
215/*
216 * Encapsulate access to the itm structure for SMP.
217 */
218void
219ia64_cpu_local_tick (void)
220{
221 int cpu = smp_processor_id();
222 unsigned long shift = 0, delta;
223
224 /* arrange for the cycle counter to generate a timer interrupt: */
225 ia64_set_itv(IA64_TIMER_VECTOR);
226
227 delta = local_cpu_data->itm_delta;
228 /*
229 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
230 * same time:
231 */
232 if (cpu) {
233 unsigned long hi = 1UL << ia64_fls(cpu);
234 shift = (2*(cpu - hi) + 1) * delta/hi/2;
235 }
236 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
237 ia64_set_itm(local_cpu_data->itm_next);
238}
239
240static int nojitter;
241
242static int __init nojitter_setup(char *str)
243{
244 nojitter = 1;
245 printk("Jitter checking for ITC timers disabled\n");
246 return 1;
247}
248
249__setup("nojitter", nojitter_setup);
250
251
252void ia64_init_itm(void)
253{
254 unsigned long platform_base_freq, itc_freq;
255 struct pal_freq_ratio itc_ratio, proc_ratio;
256 long status, platform_base_drift, itc_drift;
257
258 /*
259 * According to SAL v2.6, we need to use a SAL call to determine the platform base
260 * frequency and then a PAL call to determine the frequency ratio between the ITC
261 * and the base frequency.
262 */
263 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
264 &platform_base_freq, &platform_base_drift);
265 if (status != 0) {
266 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
267 } else {
268 status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
269 if (status != 0)
270 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
271 }
272 if (status != 0) {
273 /* invent "random" values */
274 printk(KERN_ERR
275 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
276 platform_base_freq = 100000000;
277 platform_base_drift = -1; /* no drift info */
278 itc_ratio.num = 3;
279 itc_ratio.den = 1;
280 }
281 if (platform_base_freq < 40000000) {
282 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
283 platform_base_freq);
284 platform_base_freq = 75000000;
285 platform_base_drift = -1;
286 }
287 if (!proc_ratio.den)
288 proc_ratio.den = 1; /* avoid division by zero */
289 if (!itc_ratio.den)
290 itc_ratio.den = 1; /* avoid division by zero */
291
292 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
293
294 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
295 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
296 "ITC freq=%lu.%03luMHz", smp_processor_id(),
297 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
298 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
299
300 if (platform_base_drift != -1) {
301 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
302 printk("+/-%ldppm\n", itc_drift);
303 } else {
304 itc_drift = -1;
305 printk("\n");
306 }
307
308 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
309 local_cpu_data->itc_freq = itc_freq;
310 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
311 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
312 + itc_freq/2)/itc_freq;
313
314 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
315#ifdef CONFIG_SMP
316 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
317 * Jitter compensation requires a cmpxchg which may limit
318 * the scalability of the syscalls for retrieving time.
319 * The ITC synchronization is usually successful to within a few
320 * ITC ticks but this is not a sure thing. If you need to improve
321 * timer performance in SMP situations then boot the kernel with the
322 * "nojitter" option. However, doing so may result in time fluctuating (maybe
323 * even going backward) if the ITC offsets between the individual CPUs
324 * are too large.
325 */
326 if (!nojitter)
327 itc_jitter_data.itc_jitter = 1;
328#endif
329 } else
330 /*
331 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
332 * ITC values may fluctuate significantly between processors.
333 * Clock should not be used for hrtimers. Mark itc as only
334 * useful for boot and testing.
335 *
336 * Note that jitter compensation is off! There is no point of
337 * synchronizing ITCs since they may be large differentials
338 * that change over time.
339 *
340 * The only way to fix this would be to repeatedly sync the
341 * ITCs. Until that time we have to avoid ITC.
342 */
343 clocksource_itc.rating = 50;
344
345 /* avoid softlock up message when cpu is unplug and plugged again. */
346 touch_softlockup_watchdog();
347
348 /* Setup the CPU local timer tick */
349 ia64_cpu_local_tick();
350
351 if (!itc_clocksource) {
352 clocksource_register_hz(&clocksource_itc,
353 local_cpu_data->itc_freq);
354 itc_clocksource = &clocksource_itc;
355 }
356}
357
358static u64 itc_get_cycles(struct clocksource *cs)
359{
360 unsigned long lcycle, now, ret;
361
362 if (!itc_jitter_data.itc_jitter)
363 return get_cycles();
364
365 lcycle = itc_jitter_data.itc_lastcycle;
366 now = get_cycles();
367 if (lcycle && time_after(lcycle, now))
368 return lcycle;
369
370 /*
371 * Keep track of the last timer value returned.
372 * In an SMP environment, you could lose out in contention of
373 * cmpxchg. If so, your cmpxchg returns new value which the
374 * winner of contention updated to. Use the new value instead.
375 */
376 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
377 if (unlikely(ret != lcycle))
378 return ret;
379
380 return now;
381}
382
383
384static struct irqaction timer_irqaction = {
385 .handler = timer_interrupt,
386 .flags = IRQF_IRQPOLL,
387 .name = "timer"
388};
389
390void read_persistent_clock64(struct timespec64 *ts)
391{
392 efi_gettimeofday(ts);
393}
394
395void __init
396time_init (void)
397{
398 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
399 ia64_init_itm();
400}
401
402/*
403 * Generic udelay assumes that if preemption is allowed and the thread
404 * migrates to another CPU, that the ITC values are synchronized across
405 * all CPUs.
406 */
407static void
408ia64_itc_udelay (unsigned long usecs)
409{
410 unsigned long start = ia64_get_itc();
411 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
412
413 while (time_before(ia64_get_itc(), end))
414 cpu_relax();
415}
416
417void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
418
419void
420udelay (unsigned long usecs)
421{
422 (*ia64_udelay)(usecs);
423}
424EXPORT_SYMBOL(udelay);
425
426/* IA64 doesn't cache the timezone */
427void update_vsyscall_tz(void)
428{
429}
430
431void update_vsyscall(struct timekeeper *tk)
432{
433 write_seqcount_begin(&fsyscall_gtod_data.seq);
434
435 /* copy vsyscall data */
436 fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
437 fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
438 fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
439 fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
440 fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
441
442 fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
443 fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
444
445 fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
446 + tk->wall_to_monotonic.tv_sec;
447 fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
448 + ((u64)tk->wall_to_monotonic.tv_nsec
449 << tk->tkr_mono.shift);
450
451 /* normalize */
452 while (fsyscall_gtod_data.monotonic_time.snsec >=
453 (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
454 fsyscall_gtod_data.monotonic_time.snsec -=
455 ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
456 fsyscall_gtod_data.monotonic_time.sec++;
457 }
458
459 write_seqcount_end(&fsyscall_gtod_data.seq);
460}
461
1/*
2 * linux/arch/ia64/kernel/time.c
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
10 */
11
12#include <linux/cpu.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/profile.h>
17#include <linux/sched.h>
18#include <linux/time.h>
19#include <linux/interrupt.h>
20#include <linux/efi.h>
21#include <linux/timex.h>
22#include <linux/timekeeper_internal.h>
23#include <linux/platform_device.h>
24
25#include <asm/machvec.h>
26#include <asm/delay.h>
27#include <asm/hw_irq.h>
28#include <asm/paravirt.h>
29#include <asm/ptrace.h>
30#include <asm/sal.h>
31#include <asm/sections.h>
32
33#include "fsyscall_gtod_data.h"
34
35static cycle_t itc_get_cycles(struct clocksource *cs);
36
37struct fsyscall_gtod_data_t fsyscall_gtod_data;
38
39struct itc_jitter_data_t itc_jitter_data;
40
41volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
42
43#ifdef CONFIG_IA64_DEBUG_IRQ
44
45unsigned long last_cli_ip;
46EXPORT_SYMBOL(last_cli_ip);
47
48#endif
49
50#ifdef CONFIG_PARAVIRT
51/* We need to define a real function for sched_clock, to override the
52 weak default version */
53unsigned long long sched_clock(void)
54{
55 return paravirt_sched_clock();
56}
57#endif
58
59#ifdef CONFIG_PARAVIRT
60static void
61paravirt_clocksource_resume(struct clocksource *cs)
62{
63 if (pv_time_ops.clocksource_resume)
64 pv_time_ops.clocksource_resume();
65}
66#endif
67
68static struct clocksource clocksource_itc = {
69 .name = "itc",
70 .rating = 350,
71 .read = itc_get_cycles,
72 .mask = CLOCKSOURCE_MASK(64),
73 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
74#ifdef CONFIG_PARAVIRT
75 .resume = paravirt_clocksource_resume,
76#endif
77};
78static struct clocksource *itc_clocksource;
79
80#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
81
82#include <linux/kernel_stat.h>
83
84extern cputime_t cycle_to_cputime(u64 cyc);
85
86void vtime_account_user(struct task_struct *tsk)
87{
88 cputime_t delta_utime;
89 struct thread_info *ti = task_thread_info(tsk);
90
91 if (ti->ac_utime) {
92 delta_utime = cycle_to_cputime(ti->ac_utime);
93 account_user_time(tsk, delta_utime, delta_utime);
94 ti->ac_utime = 0;
95 }
96}
97
98/*
99 * Called from the context switch with interrupts disabled, to charge all
100 * accumulated times to the current process, and to prepare accounting on
101 * the next process.
102 */
103void arch_vtime_task_switch(struct task_struct *prev)
104{
105 struct thread_info *pi = task_thread_info(prev);
106 struct thread_info *ni = task_thread_info(current);
107
108 pi->ac_stamp = ni->ac_stamp;
109 ni->ac_stime = ni->ac_utime = 0;
110}
111
112/*
113 * Account time for a transition between system, hard irq or soft irq state.
114 * Note that this function is called with interrupts enabled.
115 */
116static cputime_t vtime_delta(struct task_struct *tsk)
117{
118 struct thread_info *ti = task_thread_info(tsk);
119 cputime_t delta_stime;
120 __u64 now;
121
122 WARN_ON_ONCE(!irqs_disabled());
123
124 now = ia64_get_itc();
125
126 delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
127 ti->ac_stime = 0;
128 ti->ac_stamp = now;
129
130 return delta_stime;
131}
132
133void vtime_account_system(struct task_struct *tsk)
134{
135 cputime_t delta = vtime_delta(tsk);
136
137 account_system_time(tsk, 0, delta, delta);
138}
139EXPORT_SYMBOL_GPL(vtime_account_system);
140
141void vtime_account_idle(struct task_struct *tsk)
142{
143 account_idle_time(vtime_delta(tsk));
144}
145
146#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
147
148static irqreturn_t
149timer_interrupt (int irq, void *dev_id)
150{
151 unsigned long new_itm;
152
153 if (cpu_is_offline(smp_processor_id())) {
154 return IRQ_HANDLED;
155 }
156
157 platform_timer_interrupt(irq, dev_id);
158
159 new_itm = local_cpu_data->itm_next;
160
161 if (!time_after(ia64_get_itc(), new_itm))
162 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
163 ia64_get_itc(), new_itm);
164
165 profile_tick(CPU_PROFILING);
166
167 if (paravirt_do_steal_accounting(&new_itm))
168 goto skip_process_time_accounting;
169
170 while (1) {
171 update_process_times(user_mode(get_irq_regs()));
172
173 new_itm += local_cpu_data->itm_delta;
174
175 if (smp_processor_id() == time_keeper_id)
176 xtime_update(1);
177
178 local_cpu_data->itm_next = new_itm;
179
180 if (time_after(new_itm, ia64_get_itc()))
181 break;
182
183 /*
184 * Allow IPIs to interrupt the timer loop.
185 */
186 local_irq_enable();
187 local_irq_disable();
188 }
189
190skip_process_time_accounting:
191
192 do {
193 /*
194 * If we're too close to the next clock tick for
195 * comfort, we increase the safety margin by
196 * intentionally dropping the next tick(s). We do NOT
197 * update itm.next because that would force us to call
198 * xtime_update() which in turn would let our clock run
199 * too fast (with the potentially devastating effect
200 * of losing monotony of time).
201 */
202 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
203 new_itm += local_cpu_data->itm_delta;
204 ia64_set_itm(new_itm);
205 /* double check, in case we got hit by a (slow) PMI: */
206 } while (time_after_eq(ia64_get_itc(), new_itm));
207 return IRQ_HANDLED;
208}
209
210/*
211 * Encapsulate access to the itm structure for SMP.
212 */
213void
214ia64_cpu_local_tick (void)
215{
216 int cpu = smp_processor_id();
217 unsigned long shift = 0, delta;
218
219 /* arrange for the cycle counter to generate a timer interrupt: */
220 ia64_set_itv(IA64_TIMER_VECTOR);
221
222 delta = local_cpu_data->itm_delta;
223 /*
224 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
225 * same time:
226 */
227 if (cpu) {
228 unsigned long hi = 1UL << ia64_fls(cpu);
229 shift = (2*(cpu - hi) + 1) * delta/hi/2;
230 }
231 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
232 ia64_set_itm(local_cpu_data->itm_next);
233}
234
235static int nojitter;
236
237static int __init nojitter_setup(char *str)
238{
239 nojitter = 1;
240 printk("Jitter checking for ITC timers disabled\n");
241 return 1;
242}
243
244__setup("nojitter", nojitter_setup);
245
246
247void ia64_init_itm(void)
248{
249 unsigned long platform_base_freq, itc_freq;
250 struct pal_freq_ratio itc_ratio, proc_ratio;
251 long status, platform_base_drift, itc_drift;
252
253 /*
254 * According to SAL v2.6, we need to use a SAL call to determine the platform base
255 * frequency and then a PAL call to determine the frequency ratio between the ITC
256 * and the base frequency.
257 */
258 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
259 &platform_base_freq, &platform_base_drift);
260 if (status != 0) {
261 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
262 } else {
263 status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
264 if (status != 0)
265 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
266 }
267 if (status != 0) {
268 /* invent "random" values */
269 printk(KERN_ERR
270 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
271 platform_base_freq = 100000000;
272 platform_base_drift = -1; /* no drift info */
273 itc_ratio.num = 3;
274 itc_ratio.den = 1;
275 }
276 if (platform_base_freq < 40000000) {
277 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
278 platform_base_freq);
279 platform_base_freq = 75000000;
280 platform_base_drift = -1;
281 }
282 if (!proc_ratio.den)
283 proc_ratio.den = 1; /* avoid division by zero */
284 if (!itc_ratio.den)
285 itc_ratio.den = 1; /* avoid division by zero */
286
287 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
288
289 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
290 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
291 "ITC freq=%lu.%03luMHz", smp_processor_id(),
292 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
293 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
294
295 if (platform_base_drift != -1) {
296 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
297 printk("+/-%ldppm\n", itc_drift);
298 } else {
299 itc_drift = -1;
300 printk("\n");
301 }
302
303 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
304 local_cpu_data->itc_freq = itc_freq;
305 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
306 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
307 + itc_freq/2)/itc_freq;
308
309 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
310#ifdef CONFIG_SMP
311 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
312 * Jitter compensation requires a cmpxchg which may limit
313 * the scalability of the syscalls for retrieving time.
314 * The ITC synchronization is usually successful to within a few
315 * ITC ticks but this is not a sure thing. If you need to improve
316 * timer performance in SMP situations then boot the kernel with the
317 * "nojitter" option. However, doing so may result in time fluctuating (maybe
318 * even going backward) if the ITC offsets between the individual CPUs
319 * are too large.
320 */
321 if (!nojitter)
322 itc_jitter_data.itc_jitter = 1;
323#endif
324 } else
325 /*
326 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
327 * ITC values may fluctuate significantly between processors.
328 * Clock should not be used for hrtimers. Mark itc as only
329 * useful for boot and testing.
330 *
331 * Note that jitter compensation is off! There is no point of
332 * synchronizing ITCs since they may be large differentials
333 * that change over time.
334 *
335 * The only way to fix this would be to repeatedly sync the
336 * ITCs. Until that time we have to avoid ITC.
337 */
338 clocksource_itc.rating = 50;
339
340 paravirt_init_missing_ticks_accounting(smp_processor_id());
341
342 /* avoid softlock up message when cpu is unplug and plugged again. */
343 touch_softlockup_watchdog();
344
345 /* Setup the CPU local timer tick */
346 ia64_cpu_local_tick();
347
348 if (!itc_clocksource) {
349 clocksource_register_hz(&clocksource_itc,
350 local_cpu_data->itc_freq);
351 itc_clocksource = &clocksource_itc;
352 }
353}
354
355static cycle_t itc_get_cycles(struct clocksource *cs)
356{
357 unsigned long lcycle, now, ret;
358
359 if (!itc_jitter_data.itc_jitter)
360 return get_cycles();
361
362 lcycle = itc_jitter_data.itc_lastcycle;
363 now = get_cycles();
364 if (lcycle && time_after(lcycle, now))
365 return lcycle;
366
367 /*
368 * Keep track of the last timer value returned.
369 * In an SMP environment, you could lose out in contention of
370 * cmpxchg. If so, your cmpxchg returns new value which the
371 * winner of contention updated to. Use the new value instead.
372 */
373 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
374 if (unlikely(ret != lcycle))
375 return ret;
376
377 return now;
378}
379
380
381static struct irqaction timer_irqaction = {
382 .handler = timer_interrupt,
383 .flags = IRQF_IRQPOLL,
384 .name = "timer"
385};
386
387static struct platform_device rtc_efi_dev = {
388 .name = "rtc-efi",
389 .id = -1,
390};
391
392static int __init rtc_init(void)
393{
394 if (platform_device_register(&rtc_efi_dev) < 0)
395 printk(KERN_ERR "unable to register rtc device...\n");
396
397 /* not necessarily an error */
398 return 0;
399}
400module_init(rtc_init);
401
402void read_persistent_clock(struct timespec *ts)
403{
404 efi_gettimeofday(ts);
405}
406
407void __init
408time_init (void)
409{
410 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
411 ia64_init_itm();
412}
413
414/*
415 * Generic udelay assumes that if preemption is allowed and the thread
416 * migrates to another CPU, that the ITC values are synchronized across
417 * all CPUs.
418 */
419static void
420ia64_itc_udelay (unsigned long usecs)
421{
422 unsigned long start = ia64_get_itc();
423 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
424
425 while (time_before(ia64_get_itc(), end))
426 cpu_relax();
427}
428
429void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
430
431void
432udelay (unsigned long usecs)
433{
434 (*ia64_udelay)(usecs);
435}
436EXPORT_SYMBOL(udelay);
437
438/* IA64 doesn't cache the timezone */
439void update_vsyscall_tz(void)
440{
441}
442
443void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
444 struct clocksource *c, u32 mult)
445{
446 write_seqcount_begin(&fsyscall_gtod_data.seq);
447
448 /* copy fsyscall clock data */
449 fsyscall_gtod_data.clk_mask = c->mask;
450 fsyscall_gtod_data.clk_mult = mult;
451 fsyscall_gtod_data.clk_shift = c->shift;
452 fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
453 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
454
455 /* copy kernel time structures */
456 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
457 fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
458 fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
459 + wall->tv_sec;
460 fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
461 + wall->tv_nsec;
462
463 /* normalize */
464 while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
465 fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
466 fsyscall_gtod_data.monotonic_time.tv_sec++;
467 }
468
469 write_seqcount_end(&fsyscall_gtod_data.seq);
470}
471