Loading...
1#ifndef __ASMARM_ARCH_TIMER_H
2#define __ASMARM_ARCH_TIMER_H
3
4#include <asm/barrier.h>
5#include <asm/errno.h>
6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/types.h>
9
10#include <clocksource/arm_arch_timer.h>
11
12#ifdef CONFIG_ARM_ARCH_TIMER
13int arch_timer_arch_init(void);
14
15/*
16 * These register accessors are marked inline so the compiler can
17 * nicely work out which register we want, and chuck away the rest of
18 * the code. At least it does so with a recent GCC (4.6.3).
19 */
20static __always_inline
21void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
22{
23 if (access == ARCH_TIMER_PHYS_ACCESS) {
24 switch (reg) {
25 case ARCH_TIMER_REG_CTRL:
26 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
27 break;
28 case ARCH_TIMER_REG_TVAL:
29 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
30 break;
31 }
32 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
33 switch (reg) {
34 case ARCH_TIMER_REG_CTRL:
35 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
36 break;
37 case ARCH_TIMER_REG_TVAL:
38 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
39 break;
40 }
41 }
42
43 isb();
44}
45
46static __always_inline
47u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
48{
49 u32 val = 0;
50
51 if (access == ARCH_TIMER_PHYS_ACCESS) {
52 switch (reg) {
53 case ARCH_TIMER_REG_CTRL:
54 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
55 break;
56 case ARCH_TIMER_REG_TVAL:
57 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
58 break;
59 }
60 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
61 switch (reg) {
62 case ARCH_TIMER_REG_CTRL:
63 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
64 break;
65 case ARCH_TIMER_REG_TVAL:
66 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
67 break;
68 }
69 }
70
71 return val;
72}
73
74static inline u32 arch_timer_get_cntfrq(void)
75{
76 u32 val;
77 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
78 return val;
79}
80
81static inline u64 arch_counter_get_cntpct(void)
82{
83 u64 cval;
84
85 isb();
86 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
87 return cval;
88}
89
90static inline u64 arch_counter_get_cntvct(void)
91{
92 u64 cval;
93
94 isb();
95 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
96 return cval;
97}
98
99static inline u32 arch_timer_get_cntkctl(void)
100{
101 u32 cntkctl;
102 asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
103 return cntkctl;
104}
105
106static inline void arch_timer_set_cntkctl(u32 cntkctl)
107{
108 asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
109}
110
111#endif
112
113#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASMARM_ARCH_TIMER_H
3#define __ASMARM_ARCH_TIMER_H
4
5#include <asm/barrier.h>
6#include <asm/errno.h>
7#include <linux/clocksource.h>
8#include <linux/init.h>
9#include <linux/types.h>
10
11#include <clocksource/arm_arch_timer.h>
12
13#ifdef CONFIG_ARM_ARCH_TIMER
14int arch_timer_arch_init(void);
15
16/*
17 * These register accessors are marked inline so the compiler can
18 * nicely work out which register we want, and chuck away the rest of
19 * the code. At least it does so with a recent GCC (4.6.3).
20 */
21static __always_inline
22void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
23{
24 if (access == ARCH_TIMER_PHYS_ACCESS) {
25 switch (reg) {
26 case ARCH_TIMER_REG_CTRL:
27 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
28 break;
29 case ARCH_TIMER_REG_TVAL:
30 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
31 break;
32 }
33 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
34 switch (reg) {
35 case ARCH_TIMER_REG_CTRL:
36 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
37 break;
38 case ARCH_TIMER_REG_TVAL:
39 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
40 break;
41 }
42 }
43
44 isb();
45}
46
47static __always_inline
48u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
49{
50 u32 val = 0;
51
52 if (access == ARCH_TIMER_PHYS_ACCESS) {
53 switch (reg) {
54 case ARCH_TIMER_REG_CTRL:
55 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
56 break;
57 case ARCH_TIMER_REG_TVAL:
58 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
59 break;
60 }
61 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
62 switch (reg) {
63 case ARCH_TIMER_REG_CTRL:
64 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
65 break;
66 case ARCH_TIMER_REG_TVAL:
67 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
68 break;
69 }
70 }
71
72 return val;
73}
74
75static inline u32 arch_timer_get_cntfrq(void)
76{
77 u32 val;
78 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
79 return val;
80}
81
82static inline u64 arch_counter_get_cntpct(void)
83{
84 u64 cval;
85
86 isb();
87 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
88 return cval;
89}
90
91static inline u64 arch_counter_get_cntvct(void)
92{
93 u64 cval;
94
95 isb();
96 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
97 return cval;
98}
99
100static inline u32 arch_timer_get_cntkctl(void)
101{
102 u32 cntkctl;
103 asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
104 return cntkctl;
105}
106
107static inline void arch_timer_set_cntkctl(u32 cntkctl)
108{
109 asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
110 isb();
111}
112
113#endif
114
115#endif