Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * apb_timer.c: Driver for Langwell APB timers
4 *
5 * (C) Copyright 2009 Intel Corporation
6 * Author: Jacob Pan (jacob.jun.pan@intel.com)
7 *
8 * Note:
9 * Langwell is the south complex of Intel Moorestown MID platform. There are
10 * eight external timers in total that can be used by the operating system.
11 * The timer information, such as frequency and addresses, is provided to the
12 * OS via SFI tables.
13 * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
14 * individual redirection table entries (RTE).
15 * Unlike HPET, there is no master counter, therefore one of the timers are
16 * used as clocksource. The overall allocation looks like:
17 * - timer 0 - NR_CPUs for per cpu timer
18 * - one timer for clocksource
19 * - one timer for watchdog driver.
20 * It is also worth notice that APB timer does not support true one-shot mode,
21 * free-running mode will be used here to emulate one-shot mode.
22 * APB timer can also be used as broadcast timer along with per cpu local APIC
23 * timer, but by default APB timer has higher rating than local APIC timers.
24 */
25
26#include <linux/delay.h>
27#include <linux/dw_apb_timer.h>
28#include <linux/errno.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pm.h>
32#include <linux/sfi.h>
33#include <linux/interrupt.h>
34#include <linux/cpu.h>
35#include <linux/irq.h>
36
37#include <asm/fixmap.h>
38#include <asm/apb_timer.h>
39#include <asm/intel-mid.h>
40#include <asm/time.h>
41
42#define APBT_CLOCKEVENT_RATING 110
43#define APBT_CLOCKSOURCE_RATING 250
44
45#define APBT_CLOCKEVENT0_NUM (0)
46#define APBT_CLOCKSOURCE_NUM (2)
47
48static phys_addr_t apbt_address;
49static int apb_timer_block_enabled;
50static void __iomem *apbt_virt_address;
51
52/*
53 * Common DW APB timer info
54 */
55static unsigned long apbt_freq;
56
57struct apbt_dev {
58 struct dw_apb_clock_event_device *timer;
59 unsigned int num;
60 int cpu;
61 unsigned int irq;
62 char name[10];
63};
64
65static struct dw_apb_clocksource *clocksource_apbt;
66
67static inline void __iomem *adev_virt_addr(struct apbt_dev *adev)
68{
69 return apbt_virt_address + adev->num * APBTMRS_REG_SIZE;
70}
71
72static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
73
74#ifdef CONFIG_SMP
75static unsigned int apbt_num_timers_used;
76#endif
77
78static inline void apbt_set_mapping(void)
79{
80 struct sfi_timer_table_entry *mtmr;
81 int phy_cs_timer_id = 0;
82
83 if (apbt_virt_address) {
84 pr_debug("APBT base already mapped\n");
85 return;
86 }
87 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
88 if (mtmr == NULL) {
89 printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
90 APBT_CLOCKEVENT0_NUM);
91 return;
92 }
93 apbt_address = (phys_addr_t)mtmr->phys_addr;
94 if (!apbt_address) {
95 printk(KERN_WARNING "No timer base from SFI, use default\n");
96 apbt_address = APBT_DEFAULT_BASE;
97 }
98 apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
99 if (!apbt_virt_address) {
100 pr_debug("Failed mapping APBT phy address at %lu\n",\
101 (unsigned long)apbt_address);
102 goto panic_noapbt;
103 }
104 apbt_freq = mtmr->freq_hz;
105 sfi_free_mtmr(mtmr);
106
107 /* Now figure out the physical timer id for clocksource device */
108 mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
109 if (mtmr == NULL)
110 goto panic_noapbt;
111
112 /* Now figure out the physical timer id */
113 pr_debug("Use timer %d for clocksource\n",
114 (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE);
115 phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) /
116 APBTMRS_REG_SIZE;
117
118 clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING,
119 "apbt0", apbt_virt_address + phy_cs_timer_id *
120 APBTMRS_REG_SIZE, apbt_freq);
121 return;
122
123panic_noapbt:
124 panic("Failed to setup APB system timer\n");
125
126}
127
128static inline void apbt_clear_mapping(void)
129{
130 iounmap(apbt_virt_address);
131 apbt_virt_address = NULL;
132}
133
134static int __init apbt_clockevent_register(void)
135{
136 struct sfi_timer_table_entry *mtmr;
137 struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);
138
139 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
140 if (mtmr == NULL) {
141 printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
142 APBT_CLOCKEVENT0_NUM);
143 return -ENODEV;
144 }
145
146 adev->num = smp_processor_id();
147 adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
148 intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
149 APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
150 adev_virt_addr(adev), 0, apbt_freq);
151 /* Firmware does EOI handling for us. */
152 adev->timer->eoi = NULL;
153
154 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
155 global_clock_event = &adev->timer->ced;
156 printk(KERN_DEBUG "%s clockevent registered as global\n",
157 global_clock_event->name);
158 }
159
160 dw_apb_clockevent_register(adev->timer);
161
162 sfi_free_mtmr(mtmr);
163 return 0;
164}
165
166#ifdef CONFIG_SMP
167
168static void apbt_setup_irq(struct apbt_dev *adev)
169{
170 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
171 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
172}
173
174/* Should be called with per cpu */
175void apbt_setup_secondary_clock(void)
176{
177 struct apbt_dev *adev;
178 int cpu;
179
180 /* Don't register boot CPU clockevent */
181 cpu = smp_processor_id();
182 if (!cpu)
183 return;
184
185 adev = this_cpu_ptr(&cpu_apbt_dev);
186 if (!adev->timer) {
187 adev->timer = dw_apb_clockevent_init(cpu, adev->name,
188 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
189 adev->irq, apbt_freq);
190 adev->timer->eoi = NULL;
191 } else {
192 dw_apb_clockevent_resume(adev->timer);
193 }
194
195 printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
196 cpu, adev->name, adev->cpu);
197
198 apbt_setup_irq(adev);
199 dw_apb_clockevent_register(adev->timer);
200
201 return;
202}
203
204/*
205 * this notify handler process CPU hotplug events. in case of S0i3, nonboot
206 * cpus are disabled/enabled frequently, for performance reasons, we keep the
207 * per cpu timer irq registered so that we do need to do free_irq/request_irq.
208 *
209 * TODO: it might be more reliable to directly disable percpu clockevent device
210 * without the notifier chain. currently, cpu 0 may get interrupts from other
211 * cpu timers during the offline process due to the ordering of notification.
212 * the extra interrupt is harmless.
213 */
214static int apbt_cpu_dead(unsigned int cpu)
215{
216 struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
217
218 dw_apb_clockevent_pause(adev->timer);
219 if (system_state == SYSTEM_RUNNING) {
220 pr_debug("skipping APBT CPU %u offline\n", cpu);
221 } else {
222 pr_debug("APBT clockevent for cpu %u offline\n", cpu);
223 dw_apb_clockevent_stop(adev->timer);
224 }
225 return 0;
226}
227
228static __init int apbt_late_init(void)
229{
230 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
231 !apb_timer_block_enabled)
232 return 0;
233 return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
234 apbt_cpu_dead);
235}
236fs_initcall(apbt_late_init);
237#else
238
239void apbt_setup_secondary_clock(void) {}
240
241#endif /* CONFIG_SMP */
242
243static int apbt_clocksource_register(void)
244{
245 u64 start, now;
246 u64 t1;
247
248 /* Start the counter, use timer 2 as source, timer 0/1 for event */
249 dw_apb_clocksource_start(clocksource_apbt);
250
251 /* Verify whether apbt counter works */
252 t1 = dw_apb_clocksource_read(clocksource_apbt);
253 start = rdtsc();
254
255 /*
256 * We don't know the TSC frequency yet, but waiting for
257 * 200000 TSC cycles is safe:
258 * 4 GHz == 50us
259 * 1 GHz == 200us
260 */
261 do {
262 rep_nop();
263 now = rdtsc();
264 } while ((now - start) < 200000UL);
265
266 /* APBT is the only always on clocksource, it has to work! */
267 if (t1 == dw_apb_clocksource_read(clocksource_apbt))
268 panic("APBT counter not counting. APBT disabled\n");
269
270 dw_apb_clocksource_register(clocksource_apbt);
271
272 return 0;
273}
274
275/*
276 * Early setup the APBT timer, only use timer 0 for booting then switch to
277 * per CPU timer if possible.
278 * returns 1 if per cpu apbt is setup
279 * returns 0 if no per cpu apbt is chosen
280 * panic if set up failed, this is the only platform timer on Moorestown.
281 */
282void __init apbt_time_init(void)
283{
284#ifdef CONFIG_SMP
285 int i;
286 struct sfi_timer_table_entry *p_mtmr;
287 struct apbt_dev *adev;
288#endif
289
290 if (apb_timer_block_enabled)
291 return;
292 apbt_set_mapping();
293 if (!apbt_virt_address)
294 goto out_noapbt;
295 /*
296 * Read the frequency and check for a sane value, for ESL model
297 * we extend the possible clock range to allow time scaling.
298 */
299
300 if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
301 pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq);
302 goto out_noapbt;
303 }
304 if (apbt_clocksource_register()) {
305 pr_debug("APBT has failed to register clocksource\n");
306 goto out_noapbt;
307 }
308 if (!apbt_clockevent_register())
309 apb_timer_block_enabled = 1;
310 else {
311 pr_debug("APBT has failed to register clockevent\n");
312 goto out_noapbt;
313 }
314#ifdef CONFIG_SMP
315 /* kernel cmdline disable apb timer, so we will use lapic timers */
316 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
317 printk(KERN_INFO "apbt: disabled per cpu timer\n");
318 return;
319 }
320 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
321 if (num_possible_cpus() <= sfi_mtimer_num)
322 apbt_num_timers_used = num_possible_cpus();
323 else
324 apbt_num_timers_used = 1;
325 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
326
327 /* here we set up per CPU timer data structure */
328 for (i = 0; i < apbt_num_timers_used; i++) {
329 adev = &per_cpu(cpu_apbt_dev, i);
330 adev->num = i;
331 adev->cpu = i;
332 p_mtmr = sfi_get_mtmr(i);
333 if (p_mtmr)
334 adev->irq = p_mtmr->irq;
335 else
336 printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
337 snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
338 }
339#endif
340
341 return;
342
343out_noapbt:
344 apbt_clear_mapping();
345 apb_timer_block_enabled = 0;
346 panic("failed to enable APB timer\n");
347}
348
349/* called before apb_timer_enable, use early map */
350unsigned long apbt_quick_calibrate(void)
351{
352 int i, scale;
353 u64 old, new;
354 u64 t1, t2;
355 unsigned long khz = 0;
356 u32 loop, shift;
357
358 apbt_set_mapping();
359 dw_apb_clocksource_start(clocksource_apbt);
360
361 /* check if the timer can count down, otherwise return */
362 old = dw_apb_clocksource_read(clocksource_apbt);
363 i = 10000;
364 while (--i) {
365 if (old != dw_apb_clocksource_read(clocksource_apbt))
366 break;
367 }
368 if (!i)
369 goto failed;
370
371 /* count 16 ms */
372 loop = (apbt_freq / 1000) << 4;
373
374 /* restart the timer to ensure it won't get to 0 in the calibration */
375 dw_apb_clocksource_start(clocksource_apbt);
376
377 old = dw_apb_clocksource_read(clocksource_apbt);
378 old += loop;
379
380 t1 = rdtsc();
381
382 do {
383 new = dw_apb_clocksource_read(clocksource_apbt);
384 } while (new < old);
385
386 t2 = rdtsc();
387
388 shift = 5;
389 if (unlikely(loop >> shift == 0)) {
390 printk(KERN_INFO
391 "APBT TSC calibration failed, not enough resolution\n");
392 return 0;
393 }
394 scale = (int)div_u64((t2 - t1), loop >> shift);
395 khz = (scale * (apbt_freq / 1000)) >> shift;
396 printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
397 return khz;
398failed:
399 return 0;
400}
1/*
2 * apb_timer.c: Driver for Langwell APB timers
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Jacob Pan (jacob.jun.pan@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * Note:
13 * Langwell is the south complex of Intel Moorestown MID platform. There are
14 * eight external timers in total that can be used by the operating system.
15 * The timer information, such as frequency and addresses, is provided to the
16 * OS via SFI tables.
17 * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
18 * individual redirection table entries (RTE).
19 * Unlike HPET, there is no master counter, therefore one of the timers are
20 * used as clocksource. The overall allocation looks like:
21 * - timer 0 - NR_CPUs for per cpu timer
22 * - one timer for clocksource
23 * - one timer for watchdog driver.
24 * It is also worth notice that APB timer does not support true one-shot mode,
25 * free-running mode will be used here to emulate one-shot mode.
26 * APB timer can also be used as broadcast timer along with per cpu local APIC
27 * timer, but by default APB timer has higher rating than local APIC timers.
28 */
29
30#include <linux/delay.h>
31#include <linux/dw_apb_timer.h>
32#include <linux/errno.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35#include <linux/pm.h>
36#include <linux/sfi.h>
37#include <linux/interrupt.h>
38#include <linux/cpu.h>
39#include <linux/irq.h>
40
41#include <asm/fixmap.h>
42#include <asm/apb_timer.h>
43#include <asm/intel-mid.h>
44#include <asm/time.h>
45
46#define APBT_CLOCKEVENT_RATING 110
47#define APBT_CLOCKSOURCE_RATING 250
48
49#define APBT_CLOCKEVENT0_NUM (0)
50#define APBT_CLOCKSOURCE_NUM (2)
51
52static phys_addr_t apbt_address;
53static int apb_timer_block_enabled;
54static void __iomem *apbt_virt_address;
55
56/*
57 * Common DW APB timer info
58 */
59static unsigned long apbt_freq;
60
61struct apbt_dev {
62 struct dw_apb_clock_event_device *timer;
63 unsigned int num;
64 int cpu;
65 unsigned int irq;
66 char name[10];
67};
68
69static struct dw_apb_clocksource *clocksource_apbt;
70
71static inline void __iomem *adev_virt_addr(struct apbt_dev *adev)
72{
73 return apbt_virt_address + adev->num * APBTMRS_REG_SIZE;
74}
75
76static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
77
78#ifdef CONFIG_SMP
79static unsigned int apbt_num_timers_used;
80#endif
81
82static inline void apbt_set_mapping(void)
83{
84 struct sfi_timer_table_entry *mtmr;
85 int phy_cs_timer_id = 0;
86
87 if (apbt_virt_address) {
88 pr_debug("APBT base already mapped\n");
89 return;
90 }
91 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
92 if (mtmr == NULL) {
93 printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
94 APBT_CLOCKEVENT0_NUM);
95 return;
96 }
97 apbt_address = (phys_addr_t)mtmr->phys_addr;
98 if (!apbt_address) {
99 printk(KERN_WARNING "No timer base from SFI, use default\n");
100 apbt_address = APBT_DEFAULT_BASE;
101 }
102 apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
103 if (!apbt_virt_address) {
104 pr_debug("Failed mapping APBT phy address at %lu\n",\
105 (unsigned long)apbt_address);
106 goto panic_noapbt;
107 }
108 apbt_freq = mtmr->freq_hz;
109 sfi_free_mtmr(mtmr);
110
111 /* Now figure out the physical timer id for clocksource device */
112 mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
113 if (mtmr == NULL)
114 goto panic_noapbt;
115
116 /* Now figure out the physical timer id */
117 pr_debug("Use timer %d for clocksource\n",
118 (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE);
119 phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) /
120 APBTMRS_REG_SIZE;
121
122 clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING,
123 "apbt0", apbt_virt_address + phy_cs_timer_id *
124 APBTMRS_REG_SIZE, apbt_freq);
125 return;
126
127panic_noapbt:
128 panic("Failed to setup APB system timer\n");
129
130}
131
132static inline void apbt_clear_mapping(void)
133{
134 iounmap(apbt_virt_address);
135 apbt_virt_address = NULL;
136}
137
138static int __init apbt_clockevent_register(void)
139{
140 struct sfi_timer_table_entry *mtmr;
141 struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);
142
143 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
144 if (mtmr == NULL) {
145 printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
146 APBT_CLOCKEVENT0_NUM);
147 return -ENODEV;
148 }
149
150 adev->num = smp_processor_id();
151 adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
152 intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ?
153 APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
154 adev_virt_addr(adev), 0, apbt_freq);
155 /* Firmware does EOI handling for us. */
156 adev->timer->eoi = NULL;
157
158 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
159 global_clock_event = &adev->timer->ced;
160 printk(KERN_DEBUG "%s clockevent registered as global\n",
161 global_clock_event->name);
162 }
163
164 dw_apb_clockevent_register(adev->timer);
165
166 sfi_free_mtmr(mtmr);
167 return 0;
168}
169
170#ifdef CONFIG_SMP
171
172static void apbt_setup_irq(struct apbt_dev *adev)
173{
174 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
175 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
176}
177
178/* Should be called with per cpu */
179void apbt_setup_secondary_clock(void)
180{
181 struct apbt_dev *adev;
182 int cpu;
183
184 /* Don't register boot CPU clockevent */
185 cpu = smp_processor_id();
186 if (!cpu)
187 return;
188
189 adev = this_cpu_ptr(&cpu_apbt_dev);
190 if (!adev->timer) {
191 adev->timer = dw_apb_clockevent_init(cpu, adev->name,
192 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
193 adev->irq, apbt_freq);
194 adev->timer->eoi = NULL;
195 } else {
196 dw_apb_clockevent_resume(adev->timer);
197 }
198
199 printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
200 cpu, adev->name, adev->cpu);
201
202 apbt_setup_irq(adev);
203 dw_apb_clockevent_register(adev->timer);
204
205 return;
206}
207
208/*
209 * this notify handler process CPU hotplug events. in case of S0i3, nonboot
210 * cpus are disabled/enabled frequently, for performance reasons, we keep the
211 * per cpu timer irq registered so that we do need to do free_irq/request_irq.
212 *
213 * TODO: it might be more reliable to directly disable percpu clockevent device
214 * without the notifier chain. currently, cpu 0 may get interrupts from other
215 * cpu timers during the offline process due to the ordering of notification.
216 * the extra interrupt is harmless.
217 */
218static int apbt_cpu_dead(unsigned int cpu)
219{
220 struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
221
222 dw_apb_clockevent_pause(adev->timer);
223 if (system_state == SYSTEM_RUNNING) {
224 pr_debug("skipping APBT CPU %u offline\n", cpu);
225 } else {
226 pr_debug("APBT clockevent for cpu %u offline\n", cpu);
227 dw_apb_clockevent_stop(adev->timer);
228 }
229 return 0;
230}
231
232static __init int apbt_late_init(void)
233{
234 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
235 !apb_timer_block_enabled)
236 return 0;
237 return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
238 apbt_cpu_dead);
239}
240fs_initcall(apbt_late_init);
241#else
242
243void apbt_setup_secondary_clock(void) {}
244
245#endif /* CONFIG_SMP */
246
247static int apbt_clocksource_register(void)
248{
249 u64 start, now;
250 u64 t1;
251
252 /* Start the counter, use timer 2 as source, timer 0/1 for event */
253 dw_apb_clocksource_start(clocksource_apbt);
254
255 /* Verify whether apbt counter works */
256 t1 = dw_apb_clocksource_read(clocksource_apbt);
257 start = rdtsc();
258
259 /*
260 * We don't know the TSC frequency yet, but waiting for
261 * 200000 TSC cycles is safe:
262 * 4 GHz == 50us
263 * 1 GHz == 200us
264 */
265 do {
266 rep_nop();
267 now = rdtsc();
268 } while ((now - start) < 200000UL);
269
270 /* APBT is the only always on clocksource, it has to work! */
271 if (t1 == dw_apb_clocksource_read(clocksource_apbt))
272 panic("APBT counter not counting. APBT disabled\n");
273
274 dw_apb_clocksource_register(clocksource_apbt);
275
276 return 0;
277}
278
279/*
280 * Early setup the APBT timer, only use timer 0 for booting then switch to
281 * per CPU timer if possible.
282 * returns 1 if per cpu apbt is setup
283 * returns 0 if no per cpu apbt is chosen
284 * panic if set up failed, this is the only platform timer on Moorestown.
285 */
286void __init apbt_time_init(void)
287{
288#ifdef CONFIG_SMP
289 int i;
290 struct sfi_timer_table_entry *p_mtmr;
291 struct apbt_dev *adev;
292#endif
293
294 if (apb_timer_block_enabled)
295 return;
296 apbt_set_mapping();
297 if (!apbt_virt_address)
298 goto out_noapbt;
299 /*
300 * Read the frequency and check for a sane value, for ESL model
301 * we extend the possible clock range to allow time scaling.
302 */
303
304 if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
305 pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq);
306 goto out_noapbt;
307 }
308 if (apbt_clocksource_register()) {
309 pr_debug("APBT has failed to register clocksource\n");
310 goto out_noapbt;
311 }
312 if (!apbt_clockevent_register())
313 apb_timer_block_enabled = 1;
314 else {
315 pr_debug("APBT has failed to register clockevent\n");
316 goto out_noapbt;
317 }
318#ifdef CONFIG_SMP
319 /* kernel cmdline disable apb timer, so we will use lapic timers */
320 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) {
321 printk(KERN_INFO "apbt: disabled per cpu timer\n");
322 return;
323 }
324 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
325 if (num_possible_cpus() <= sfi_mtimer_num)
326 apbt_num_timers_used = num_possible_cpus();
327 else
328 apbt_num_timers_used = 1;
329 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
330
331 /* here we set up per CPU timer data structure */
332 for (i = 0; i < apbt_num_timers_used; i++) {
333 adev = &per_cpu(cpu_apbt_dev, i);
334 adev->num = i;
335 adev->cpu = i;
336 p_mtmr = sfi_get_mtmr(i);
337 if (p_mtmr)
338 adev->irq = p_mtmr->irq;
339 else
340 printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
341 snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
342 }
343#endif
344
345 return;
346
347out_noapbt:
348 apbt_clear_mapping();
349 apb_timer_block_enabled = 0;
350 panic("failed to enable APB timer\n");
351}
352
353/* called before apb_timer_enable, use early map */
354unsigned long apbt_quick_calibrate(void)
355{
356 int i, scale;
357 u64 old, new;
358 u64 t1, t2;
359 unsigned long khz = 0;
360 u32 loop, shift;
361
362 apbt_set_mapping();
363 dw_apb_clocksource_start(clocksource_apbt);
364
365 /* check if the timer can count down, otherwise return */
366 old = dw_apb_clocksource_read(clocksource_apbt);
367 i = 10000;
368 while (--i) {
369 if (old != dw_apb_clocksource_read(clocksource_apbt))
370 break;
371 }
372 if (!i)
373 goto failed;
374
375 /* count 16 ms */
376 loop = (apbt_freq / 1000) << 4;
377
378 /* restart the timer to ensure it won't get to 0 in the calibration */
379 dw_apb_clocksource_start(clocksource_apbt);
380
381 old = dw_apb_clocksource_read(clocksource_apbt);
382 old += loop;
383
384 t1 = rdtsc();
385
386 do {
387 new = dw_apb_clocksource_read(clocksource_apbt);
388 } while (new < old);
389
390 t2 = rdtsc();
391
392 shift = 5;
393 if (unlikely(loop >> shift == 0)) {
394 printk(KERN_INFO
395 "APBT TSC calibration failed, not enough resolution\n");
396 return 0;
397 }
398 scale = (int)div_u64((t2 - t1), loop >> shift);
399 khz = (scale * (apbt_freq / 1000)) >> shift;
400 printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
401 return khz;
402failed:
403 return 0;
404}