Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.1
  1/*
  2 * linux/arch/ia64/kernel/time.c
  3 *
  4 * Copyright (C) 1998-2003 Hewlett-Packard Co
  5 *	Stephane Eranian <eranian@hpl.hp.com>
  6 *	David Mosberger <davidm@hpl.hp.com>
  7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  8 * Copyright (C) 1999-2000 VA Linux Systems
  9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
 10 */
 11
 12#include <linux/cpu.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/profile.h>
 17#include <linux/sched.h>
 18#include <linux/time.h>
 19#include <linux/interrupt.h>
 20#include <linux/efi.h>
 21#include <linux/timex.h>
 22#include <linux/clocksource.h>
 23#include <linux/platform_device.h>
 24
 25#include <asm/machvec.h>
 26#include <asm/delay.h>
 27#include <asm/hw_irq.h>
 28#include <asm/paravirt.h>
 29#include <asm/ptrace.h>
 30#include <asm/sal.h>
 31#include <asm/sections.h>
 32#include <asm/system.h>
 33
 34#include "fsyscall_gtod_data.h"
 35
 36static cycle_t itc_get_cycles(struct clocksource *cs);
 37
 38struct fsyscall_gtod_data_t fsyscall_gtod_data = {
 39	.lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
 40};
 41
 42struct itc_jitter_data_t itc_jitter_data;
 43
 44volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
 45
 46#ifdef CONFIG_IA64_DEBUG_IRQ
 47
 48unsigned long last_cli_ip;
 49EXPORT_SYMBOL(last_cli_ip);
 50
 51#endif
 52
 53#ifdef CONFIG_PARAVIRT
 54/* We need to define a real function for sched_clock, to override the
 55   weak default version */
 56unsigned long long sched_clock(void)
 57{
 58        return paravirt_sched_clock();
 59}
 60#endif
 61
 62#ifdef CONFIG_PARAVIRT
 63static void
 64paravirt_clocksource_resume(struct clocksource *cs)
 65{
 66	if (pv_time_ops.clocksource_resume)
 67		pv_time_ops.clocksource_resume();
 68}
 69#endif
 70
 71static struct clocksource clocksource_itc = {
 72	.name           = "itc",
 73	.rating         = 350,
 74	.read           = itc_get_cycles,
 75	.mask           = CLOCKSOURCE_MASK(64),
 76	.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 77#ifdef CONFIG_PARAVIRT
 78	.resume		= paravirt_clocksource_resume,
 79#endif
 80};
 81static struct clocksource *itc_clocksource;
 82
 83#ifdef CONFIG_VIRT_CPU_ACCOUNTING
 84
 85#include <linux/kernel_stat.h>
 86
 87extern cputime_t cycle_to_cputime(u64 cyc);
 88
 
 
 
 
 
 
 
 
 
 
 
 
 89/*
 90 * Called from the context switch with interrupts disabled, to charge all
 91 * accumulated times to the current process, and to prepare accounting on
 92 * the next process.
 93 */
 94void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
 95{
 96	struct thread_info *pi = task_thread_info(prev);
 97	struct thread_info *ni = task_thread_info(next);
 98	cputime_t delta_stime, delta_utime;
 99	__u64 now;
100
101	now = ia64_get_itc();
102
103	delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
104	if (idle_task(smp_processor_id()) != prev)
105		account_system_time(prev, 0, delta_stime, delta_stime);
106	else
107		account_idle_time(delta_stime);
108
109	if (pi->ac_utime) {
110		delta_utime = cycle_to_cputime(pi->ac_utime);
111		account_user_time(prev, delta_utime, delta_utime);
112	}
113
114	pi->ac_stamp = ni->ac_stamp = now;
115	ni->ac_stime = ni->ac_utime = 0;
116}
117
118/*
119 * Account time for a transition between system, hard irq or soft irq state.
120 * Note that this function is called with interrupts enabled.
121 */
122void account_system_vtime(struct task_struct *tsk)
123{
124	struct thread_info *ti = task_thread_info(tsk);
125	unsigned long flags;
126	cputime_t delta_stime;
127	__u64 now;
128
129	local_irq_save(flags);
130
131	now = ia64_get_itc();
132
133	delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
134	if (irq_count() || idle_task(smp_processor_id()) != tsk)
135		account_system_time(tsk, 0, delta_stime, delta_stime);
136	else
137		account_idle_time(delta_stime);
138	ti->ac_stime = 0;
139
140	ti->ac_stamp = now;
141
142	local_irq_restore(flags);
143}
144EXPORT_SYMBOL_GPL(account_system_vtime);
145
146/*
147 * Called from the timer interrupt handler to charge accumulated user time
148 * to the current process.  Must be called with interrupts disabled.
149 */
150void account_process_tick(struct task_struct *p, int user_tick)
151{
152	struct thread_info *ti = task_thread_info(p);
153	cputime_t delta_utime;
154
155	if (ti->ac_utime) {
156		delta_utime = cycle_to_cputime(ti->ac_utime);
157		account_user_time(p, delta_utime, delta_utime);
158		ti->ac_utime = 0;
159	}
160}
 
161
162#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 
 
 
 
 
163
164static irqreturn_t
165timer_interrupt (int irq, void *dev_id)
166{
167	unsigned long new_itm;
168
169	if (cpu_is_offline(smp_processor_id())) {
170		return IRQ_HANDLED;
171	}
172
173	platform_timer_interrupt(irq, dev_id);
174
175	new_itm = local_cpu_data->itm_next;
176
177	if (!time_after(ia64_get_itc(), new_itm))
178		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
179		       ia64_get_itc(), new_itm);
180
181	profile_tick(CPU_PROFILING);
182
183	if (paravirt_do_steal_accounting(&new_itm))
184		goto skip_process_time_accounting;
185
186	while (1) {
187		update_process_times(user_mode(get_irq_regs()));
188
189		new_itm += local_cpu_data->itm_delta;
190
191		if (smp_processor_id() == time_keeper_id)
192			xtime_update(1);
193
194		local_cpu_data->itm_next = new_itm;
195
196		if (time_after(new_itm, ia64_get_itc()))
197			break;
198
199		/*
200		 * Allow IPIs to interrupt the timer loop.
201		 */
202		local_irq_enable();
203		local_irq_disable();
204	}
205
206skip_process_time_accounting:
207
208	do {
209		/*
210		 * If we're too close to the next clock tick for
211		 * comfort, we increase the safety margin by
212		 * intentionally dropping the next tick(s).  We do NOT
213		 * update itm.next because that would force us to call
214		 * xtime_update() which in turn would let our clock run
215		 * too fast (with the potentially devastating effect
216		 * of losing monotony of time).
217		 */
218		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
219			new_itm += local_cpu_data->itm_delta;
220		ia64_set_itm(new_itm);
221		/* double check, in case we got hit by a (slow) PMI: */
222	} while (time_after_eq(ia64_get_itc(), new_itm));
223	return IRQ_HANDLED;
224}
225
226/*
227 * Encapsulate access to the itm structure for SMP.
228 */
229void
230ia64_cpu_local_tick (void)
231{
232	int cpu = smp_processor_id();
233	unsigned long shift = 0, delta;
234
235	/* arrange for the cycle counter to generate a timer interrupt: */
236	ia64_set_itv(IA64_TIMER_VECTOR);
237
238	delta = local_cpu_data->itm_delta;
239	/*
240	 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
241	 * same time:
242	 */
243	if (cpu) {
244		unsigned long hi = 1UL << ia64_fls(cpu);
245		shift = (2*(cpu - hi) + 1) * delta/hi/2;
246	}
247	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
248	ia64_set_itm(local_cpu_data->itm_next);
249}
250
251static int nojitter;
252
253static int __init nojitter_setup(char *str)
254{
255	nojitter = 1;
256	printk("Jitter checking for ITC timers disabled\n");
257	return 1;
258}
259
260__setup("nojitter", nojitter_setup);
261
262
263void __devinit
264ia64_init_itm (void)
265{
266	unsigned long platform_base_freq, itc_freq;
267	struct pal_freq_ratio itc_ratio, proc_ratio;
268	long status, platform_base_drift, itc_drift;
269
270	/*
271	 * According to SAL v2.6, we need to use a SAL call to determine the platform base
272	 * frequency and then a PAL call to determine the frequency ratio between the ITC
273	 * and the base frequency.
274	 */
275	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
276				    &platform_base_freq, &platform_base_drift);
277	if (status != 0) {
278		printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
279	} else {
280		status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
281		if (status != 0)
282			printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
283	}
284	if (status != 0) {
285		/* invent "random" values */
286		printk(KERN_ERR
287		       "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
288		platform_base_freq = 100000000;
289		platform_base_drift = -1;	/* no drift info */
290		itc_ratio.num = 3;
291		itc_ratio.den = 1;
292	}
293	if (platform_base_freq < 40000000) {
294		printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
295		       platform_base_freq);
296		platform_base_freq = 75000000;
297		platform_base_drift = -1;
298	}
299	if (!proc_ratio.den)
300		proc_ratio.den = 1;	/* avoid division by zero */
301	if (!itc_ratio.den)
302		itc_ratio.den = 1;	/* avoid division by zero */
303
304	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
305
306	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
307	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
308	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
309	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
310	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
311
312	if (platform_base_drift != -1) {
313		itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
314		printk("+/-%ldppm\n", itc_drift);
315	} else {
316		itc_drift = -1;
317		printk("\n");
318	}
319
320	local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
321	local_cpu_data->itc_freq = itc_freq;
322	local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
323	local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
324					+ itc_freq/2)/itc_freq;
325
326	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
327#ifdef CONFIG_SMP
328		/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
329		 * Jitter compensation requires a cmpxchg which may limit
330		 * the scalability of the syscalls for retrieving time.
331		 * The ITC synchronization is usually successful to within a few
332		 * ITC ticks but this is not a sure thing. If you need to improve
333		 * timer performance in SMP situations then boot the kernel with the
334		 * "nojitter" option. However, doing so may result in time fluctuating (maybe
335		 * even going backward) if the ITC offsets between the individual CPUs
336		 * are too large.
337		 */
338		if (!nojitter)
339			itc_jitter_data.itc_jitter = 1;
340#endif
341	} else
342		/*
343		 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
344		 * ITC values may fluctuate significantly between processors.
345		 * Clock should not be used for hrtimers. Mark itc as only
346		 * useful for boot and testing.
347		 *
348		 * Note that jitter compensation is off! There is no point of
349		 * synchronizing ITCs since they may be large differentials
350		 * that change over time.
351		 *
352		 * The only way to fix this would be to repeatedly sync the
353		 * ITCs. Until that time we have to avoid ITC.
354		 */
355		clocksource_itc.rating = 50;
356
357	paravirt_init_missing_ticks_accounting(smp_processor_id());
358
359	/* avoid softlock up message when cpu is unplug and plugged again. */
360	touch_softlockup_watchdog();
361
362	/* Setup the CPU local timer tick */
363	ia64_cpu_local_tick();
364
365	if (!itc_clocksource) {
366		clocksource_register_hz(&clocksource_itc,
367						local_cpu_data->itc_freq);
368		itc_clocksource = &clocksource_itc;
369	}
370}
371
372static cycle_t itc_get_cycles(struct clocksource *cs)
373{
374	unsigned long lcycle, now, ret;
375
376	if (!itc_jitter_data.itc_jitter)
377		return get_cycles();
378
379	lcycle = itc_jitter_data.itc_lastcycle;
380	now = get_cycles();
381	if (lcycle && time_after(lcycle, now))
382		return lcycle;
383
384	/*
385	 * Keep track of the last timer value returned.
386	 * In an SMP environment, you could lose out in contention of
387	 * cmpxchg. If so, your cmpxchg returns new value which the
388	 * winner of contention updated to. Use the new value instead.
389	 */
390	ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
391	if (unlikely(ret != lcycle))
392		return ret;
393
394	return now;
395}
396
397
398static struct irqaction timer_irqaction = {
399	.handler =	timer_interrupt,
400	.flags =	IRQF_DISABLED | IRQF_IRQPOLL,
401	.name =		"timer"
402};
403
404static struct platform_device rtc_efi_dev = {
405	.name = "rtc-efi",
406	.id = -1,
407};
408
409static int __init rtc_init(void)
410{
411	if (platform_device_register(&rtc_efi_dev) < 0)
412		printk(KERN_ERR "unable to register rtc device...\n");
413
414	/* not necessarily an error */
415	return 0;
416}
417module_init(rtc_init);
418
419void read_persistent_clock(struct timespec *ts)
420{
421	efi_gettimeofday(ts);
422}
423
424void __init
425time_init (void)
426{
427	register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
428	ia64_init_itm();
429}
430
431/*
432 * Generic udelay assumes that if preemption is allowed and the thread
433 * migrates to another CPU, that the ITC values are synchronized across
434 * all CPUs.
435 */
436static void
437ia64_itc_udelay (unsigned long usecs)
438{
439	unsigned long start = ia64_get_itc();
440	unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
441
442	while (time_before(ia64_get_itc(), end))
443		cpu_relax();
444}
445
446void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
447
448void
449udelay (unsigned long usecs)
450{
451	(*ia64_udelay)(usecs);
452}
453EXPORT_SYMBOL(udelay);
454
455/* IA64 doesn't cache the timezone */
456void update_vsyscall_tz(void)
457{
458}
459
460void update_vsyscall(struct timespec *wall, struct timespec *wtm,
461			struct clocksource *c, u32 mult)
462{
463        unsigned long flags;
464
465        write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
466
467        /* copy fsyscall clock data */
468        fsyscall_gtod_data.clk_mask = c->mask;
469        fsyscall_gtod_data.clk_mult = mult;
470        fsyscall_gtod_data.clk_shift = c->shift;
471        fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
472        fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
473
474	/* copy kernel time structures */
475        fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
476        fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
477	fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
478							+ wall->tv_sec;
479	fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
480							+ wall->tv_nsec;
481
482	/* normalize */
483	while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
484		fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
485		fsyscall_gtod_data.monotonic_time.tv_sec++;
486	}
487
488        write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
489}
490
v4.6
  1/*
  2 * linux/arch/ia64/kernel/time.c
  3 *
  4 * Copyright (C) 1998-2003 Hewlett-Packard Co
  5 *	Stephane Eranian <eranian@hpl.hp.com>
  6 *	David Mosberger <davidm@hpl.hp.com>
  7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  8 * Copyright (C) 1999-2000 VA Linux Systems
  9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
 10 */
 11
 12#include <linux/cpu.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/profile.h>
 17#include <linux/sched.h>
 18#include <linux/time.h>
 19#include <linux/interrupt.h>
 20#include <linux/efi.h>
 21#include <linux/timex.h>
 22#include <linux/timekeeper_internal.h>
 23#include <linux/platform_device.h>
 24
 25#include <asm/machvec.h>
 26#include <asm/delay.h>
 27#include <asm/hw_irq.h>
 
 28#include <asm/ptrace.h>
 29#include <asm/sal.h>
 30#include <asm/sections.h>
 
 31
 32#include "fsyscall_gtod_data.h"
 33
 34static cycle_t itc_get_cycles(struct clocksource *cs);
 35
 36struct fsyscall_gtod_data_t fsyscall_gtod_data;
 
 
 37
 38struct itc_jitter_data_t itc_jitter_data;
 39
 40volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
 41
 42#ifdef CONFIG_IA64_DEBUG_IRQ
 43
 44unsigned long last_cli_ip;
 45EXPORT_SYMBOL(last_cli_ip);
 46
 47#endif
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49static struct clocksource clocksource_itc = {
 50	.name           = "itc",
 51	.rating         = 350,
 52	.read           = itc_get_cycles,
 53	.mask           = CLOCKSOURCE_MASK(64),
 54	.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 
 
 
 55};
 56static struct clocksource *itc_clocksource;
 57
 58#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 59
 60#include <linux/kernel_stat.h>
 61
 62extern cputime_t cycle_to_cputime(u64 cyc);
 63
 64void vtime_account_user(struct task_struct *tsk)
 65{
 66	cputime_t delta_utime;
 67	struct thread_info *ti = task_thread_info(tsk);
 68
 69	if (ti->ac_utime) {
 70		delta_utime = cycle_to_cputime(ti->ac_utime);
 71		account_user_time(tsk, delta_utime, delta_utime);
 72		ti->ac_utime = 0;
 73	}
 74}
 75
 76/*
 77 * Called from the context switch with interrupts disabled, to charge all
 78 * accumulated times to the current process, and to prepare accounting on
 79 * the next process.
 80 */
 81void arch_vtime_task_switch(struct task_struct *prev)
 82{
 83	struct thread_info *pi = task_thread_info(prev);
 84	struct thread_info *ni = task_thread_info(current);
 
 
 
 
 85
 86	pi->ac_stamp = ni->ac_stamp;
 
 
 
 
 
 
 
 
 
 
 
 87	ni->ac_stime = ni->ac_utime = 0;
 88}
 89
 90/*
 91 * Account time for a transition between system, hard irq or soft irq state.
 92 * Note that this function is called with interrupts enabled.
 93 */
 94static cputime_t vtime_delta(struct task_struct *tsk)
 95{
 96	struct thread_info *ti = task_thread_info(tsk);
 
 97	cputime_t delta_stime;
 98	__u64 now;
 99
100	WARN_ON_ONCE(!irqs_disabled());
101
102	now = ia64_get_itc();
103
104	delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
 
 
 
 
105	ti->ac_stime = 0;
 
106	ti->ac_stamp = now;
107
108	return delta_stime;
109}
 
110
111void vtime_account_system(struct task_struct *tsk)
 
 
 
 
112{
113	cputime_t delta = vtime_delta(tsk);
 
114
115	account_system_time(tsk, 0, delta, delta);
 
 
 
 
116}
117EXPORT_SYMBOL_GPL(vtime_account_system);
118
119void vtime_account_idle(struct task_struct *tsk)
120{
121	account_idle_time(vtime_delta(tsk));
122}
123
124#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
125
126static irqreturn_t
127timer_interrupt (int irq, void *dev_id)
128{
129	unsigned long new_itm;
130
131	if (cpu_is_offline(smp_processor_id())) {
132		return IRQ_HANDLED;
133	}
134
135	platform_timer_interrupt(irq, dev_id);
136
137	new_itm = local_cpu_data->itm_next;
138
139	if (!time_after(ia64_get_itc(), new_itm))
140		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
141		       ia64_get_itc(), new_itm);
142
143	profile_tick(CPU_PROFILING);
144
 
 
 
145	while (1) {
146		update_process_times(user_mode(get_irq_regs()));
147
148		new_itm += local_cpu_data->itm_delta;
149
150		if (smp_processor_id() == time_keeper_id)
151			xtime_update(1);
152
153		local_cpu_data->itm_next = new_itm;
154
155		if (time_after(new_itm, ia64_get_itc()))
156			break;
157
158		/*
159		 * Allow IPIs to interrupt the timer loop.
160		 */
161		local_irq_enable();
162		local_irq_disable();
163	}
164
 
 
165	do {
166		/*
167		 * If we're too close to the next clock tick for
168		 * comfort, we increase the safety margin by
169		 * intentionally dropping the next tick(s).  We do NOT
170		 * update itm.next because that would force us to call
171		 * xtime_update() which in turn would let our clock run
172		 * too fast (with the potentially devastating effect
173		 * of losing monotony of time).
174		 */
175		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
176			new_itm += local_cpu_data->itm_delta;
177		ia64_set_itm(new_itm);
178		/* double check, in case we got hit by a (slow) PMI: */
179	} while (time_after_eq(ia64_get_itc(), new_itm));
180	return IRQ_HANDLED;
181}
182
183/*
184 * Encapsulate access to the itm structure for SMP.
185 */
186void
187ia64_cpu_local_tick (void)
188{
189	int cpu = smp_processor_id();
190	unsigned long shift = 0, delta;
191
192	/* arrange for the cycle counter to generate a timer interrupt: */
193	ia64_set_itv(IA64_TIMER_VECTOR);
194
195	delta = local_cpu_data->itm_delta;
196	/*
197	 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
198	 * same time:
199	 */
200	if (cpu) {
201		unsigned long hi = 1UL << ia64_fls(cpu);
202		shift = (2*(cpu - hi) + 1) * delta/hi/2;
203	}
204	local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
205	ia64_set_itm(local_cpu_data->itm_next);
206}
207
208static int nojitter;
209
210static int __init nojitter_setup(char *str)
211{
212	nojitter = 1;
213	printk("Jitter checking for ITC timers disabled\n");
214	return 1;
215}
216
217__setup("nojitter", nojitter_setup);
218
219
220void ia64_init_itm(void)
 
221{
222	unsigned long platform_base_freq, itc_freq;
223	struct pal_freq_ratio itc_ratio, proc_ratio;
224	long status, platform_base_drift, itc_drift;
225
226	/*
227	 * According to SAL v2.6, we need to use a SAL call to determine the platform base
228	 * frequency and then a PAL call to determine the frequency ratio between the ITC
229	 * and the base frequency.
230	 */
231	status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
232				    &platform_base_freq, &platform_base_drift);
233	if (status != 0) {
234		printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
235	} else {
236		status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
237		if (status != 0)
238			printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
239	}
240	if (status != 0) {
241		/* invent "random" values */
242		printk(KERN_ERR
243		       "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
244		platform_base_freq = 100000000;
245		platform_base_drift = -1;	/* no drift info */
246		itc_ratio.num = 3;
247		itc_ratio.den = 1;
248	}
249	if (platform_base_freq < 40000000) {
250		printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
251		       platform_base_freq);
252		platform_base_freq = 75000000;
253		platform_base_drift = -1;
254	}
255	if (!proc_ratio.den)
256		proc_ratio.den = 1;	/* avoid division by zero */
257	if (!itc_ratio.den)
258		itc_ratio.den = 1;	/* avoid division by zero */
259
260	itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
261
262	local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
263	printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
264	       "ITC freq=%lu.%03luMHz", smp_processor_id(),
265	       platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
266	       itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
267
268	if (platform_base_drift != -1) {
269		itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
270		printk("+/-%ldppm\n", itc_drift);
271	} else {
272		itc_drift = -1;
273		printk("\n");
274	}
275
276	local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
277	local_cpu_data->itc_freq = itc_freq;
278	local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
279	local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
280					+ itc_freq/2)/itc_freq;
281
282	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
283#ifdef CONFIG_SMP
284		/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
285		 * Jitter compensation requires a cmpxchg which may limit
286		 * the scalability of the syscalls for retrieving time.
287		 * The ITC synchronization is usually successful to within a few
288		 * ITC ticks but this is not a sure thing. If you need to improve
289		 * timer performance in SMP situations then boot the kernel with the
290		 * "nojitter" option. However, doing so may result in time fluctuating (maybe
291		 * even going backward) if the ITC offsets between the individual CPUs
292		 * are too large.
293		 */
294		if (!nojitter)
295			itc_jitter_data.itc_jitter = 1;
296#endif
297	} else
298		/*
299		 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
300		 * ITC values may fluctuate significantly between processors.
301		 * Clock should not be used for hrtimers. Mark itc as only
302		 * useful for boot and testing.
303		 *
304		 * Note that jitter compensation is off! There is no point of
305		 * synchronizing ITCs since they may be large differentials
306		 * that change over time.
307		 *
308		 * The only way to fix this would be to repeatedly sync the
309		 * ITCs. Until that time we have to avoid ITC.
310		 */
311		clocksource_itc.rating = 50;
312
 
 
313	/* avoid softlock up message when cpu is unplug and plugged again. */
314	touch_softlockup_watchdog();
315
316	/* Setup the CPU local timer tick */
317	ia64_cpu_local_tick();
318
319	if (!itc_clocksource) {
320		clocksource_register_hz(&clocksource_itc,
321						local_cpu_data->itc_freq);
322		itc_clocksource = &clocksource_itc;
323	}
324}
325
326static cycle_t itc_get_cycles(struct clocksource *cs)
327{
328	unsigned long lcycle, now, ret;
329
330	if (!itc_jitter_data.itc_jitter)
331		return get_cycles();
332
333	lcycle = itc_jitter_data.itc_lastcycle;
334	now = get_cycles();
335	if (lcycle && time_after(lcycle, now))
336		return lcycle;
337
338	/*
339	 * Keep track of the last timer value returned.
340	 * In an SMP environment, you could lose out in contention of
341	 * cmpxchg. If so, your cmpxchg returns new value which the
342	 * winner of contention updated to. Use the new value instead.
343	 */
344	ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
345	if (unlikely(ret != lcycle))
346		return ret;
347
348	return now;
349}
350
351
352static struct irqaction timer_irqaction = {
353	.handler =	timer_interrupt,
354	.flags =	IRQF_IRQPOLL,
355	.name =		"timer"
356};
357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358void read_persistent_clock(struct timespec *ts)
359{
360	efi_gettimeofday(ts);
361}
362
363void __init
364time_init (void)
365{
366	register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
367	ia64_init_itm();
368}
369
370/*
371 * Generic udelay assumes that if preemption is allowed and the thread
372 * migrates to another CPU, that the ITC values are synchronized across
373 * all CPUs.
374 */
375static void
376ia64_itc_udelay (unsigned long usecs)
377{
378	unsigned long start = ia64_get_itc();
379	unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
380
381	while (time_before(ia64_get_itc(), end))
382		cpu_relax();
383}
384
385void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
386
387void
388udelay (unsigned long usecs)
389{
390	(*ia64_udelay)(usecs);
391}
392EXPORT_SYMBOL(udelay);
393
394/* IA64 doesn't cache the timezone */
395void update_vsyscall_tz(void)
396{
397}
398
399void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
400			 struct clocksource *c, u32 mult, cycle_t cycle_last)
401{
402	write_seqcount_begin(&fsyscall_gtod_data.seq);
 
 
403
404        /* copy fsyscall clock data */
405        fsyscall_gtod_data.clk_mask = c->mask;
406        fsyscall_gtod_data.clk_mult = mult;
407        fsyscall_gtod_data.clk_shift = c->shift;
408        fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
409        fsyscall_gtod_data.clk_cycle_last = cycle_last;
410
411	/* copy kernel time structures */
412        fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
413        fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
414	fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
415							+ wall->tv_sec;
416	fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
417							+ wall->tv_nsec;
418
419	/* normalize */
420	while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
421		fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
422		fsyscall_gtod_data.monotonic_time.tv_sec++;
423	}
424
425	write_seqcount_end(&fsyscall_gtod_data.seq);
426}
427