Loading...
1/*
2 * Copyright (C) 2018 ARM Limited
3 * Copyright (C) 2015 Imagination Technologies
4 * Author: Alex Smith <alex.smith@imgtec.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11#ifndef __ASM_VDSO_GETTIMEOFDAY_H
12#define __ASM_VDSO_GETTIMEOFDAY_H
13
14#ifndef __ASSEMBLY__
15
16#include <asm/vdso/vdso.h>
17#include <asm/clocksource.h>
18#include <asm/unistd.h>
19#include <asm/vdso.h>
20
21#define VDSO_HAS_CLOCK_GETRES 1
22
23#if MIPS_ISA_REV < 6
24#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
25#else
26#define VDSO_SYSCALL_CLOBBERS
27#endif
28
29static __always_inline long gettimeofday_fallback(
30 struct __kernel_old_timeval *_tv,
31 struct timezone *_tz)
32{
33 register struct timezone *tz asm("a1") = _tz;
34 register struct __kernel_old_timeval *tv asm("a0") = _tv;
35 register long ret asm("v0");
36 register long nr asm("v0") = __NR_gettimeofday;
37 register long error asm("a3");
38
39 asm volatile(
40 " syscall\n"
41 : "=r" (ret), "=r" (error)
42 : "r" (tv), "r" (tz), "r" (nr)
43 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
44 "$14", "$15", "$24", "$25",
45 VDSO_SYSCALL_CLOBBERS
46 "memory");
47
48 return error ? -ret : ret;
49}
50
51static __always_inline long clock_gettime_fallback(
52 clockid_t _clkid,
53 struct __kernel_timespec *_ts)
54{
55 register struct __kernel_timespec *ts asm("a1") = _ts;
56 register clockid_t clkid asm("a0") = _clkid;
57 register long ret asm("v0");
58#if _MIPS_SIM == _MIPS_SIM_ABI64
59 register long nr asm("v0") = __NR_clock_gettime;
60#else
61 register long nr asm("v0") = __NR_clock_gettime64;
62#endif
63 register long error asm("a3");
64
65 asm volatile(
66 " syscall\n"
67 : "=r" (ret), "=r" (error)
68 : "r" (clkid), "r" (ts), "r" (nr)
69 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
70 "$14", "$15", "$24", "$25",
71 VDSO_SYSCALL_CLOBBERS
72 "memory");
73
74 return error ? -ret : ret;
75}
76
77static __always_inline int clock_getres_fallback(
78 clockid_t _clkid,
79 struct __kernel_timespec *_ts)
80{
81 register struct __kernel_timespec *ts asm("a1") = _ts;
82 register clockid_t clkid asm("a0") = _clkid;
83 register long ret asm("v0");
84#if _MIPS_SIM == _MIPS_SIM_ABI64
85 register long nr asm("v0") = __NR_clock_getres;
86#else
87 register long nr asm("v0") = __NR_clock_getres_time64;
88#endif
89 register long error asm("a3");
90
91 asm volatile(
92 " syscall\n"
93 : "=r" (ret), "=r" (error)
94 : "r" (clkid), "r" (ts), "r" (nr)
95 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
96 "$14", "$15", "$24", "$25",
97 VDSO_SYSCALL_CLOBBERS
98 "memory");
99
100 return error ? -ret : ret;
101}
102
103#if _MIPS_SIM != _MIPS_SIM_ABI64
104
105static __always_inline long clock_gettime32_fallback(
106 clockid_t _clkid,
107 struct old_timespec32 *_ts)
108{
109 register struct old_timespec32 *ts asm("a1") = _ts;
110 register clockid_t clkid asm("a0") = _clkid;
111 register long ret asm("v0");
112 register long nr asm("v0") = __NR_clock_gettime;
113 register long error asm("a3");
114
115 asm volatile(
116 " syscall\n"
117 : "=r" (ret), "=r" (error)
118 : "r" (clkid), "r" (ts), "r" (nr)
119 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
120 "$14", "$15", "$24", "$25",
121 VDSO_SYSCALL_CLOBBERS
122 "memory");
123
124 return error ? -ret : ret;
125}
126
127static __always_inline int clock_getres32_fallback(
128 clockid_t _clkid,
129 struct old_timespec32 *_ts)
130{
131 register struct old_timespec32 *ts asm("a1") = _ts;
132 register clockid_t clkid asm("a0") = _clkid;
133 register long ret asm("v0");
134 register long nr asm("v0") = __NR_clock_getres;
135 register long error asm("a3");
136
137 asm volatile(
138 " syscall\n"
139 : "=r" (ret), "=r" (error)
140 : "r" (clkid), "r" (ts), "r" (nr)
141 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
142 "$14", "$15", "$24", "$25",
143 VDSO_SYSCALL_CLOBBERS
144 "memory");
145
146 return error ? -ret : ret;
147}
148#endif
149
150#ifdef CONFIG_CSRC_R4K
151
152static __always_inline u64 read_r4k_count(void)
153{
154 unsigned int count;
155
156 __asm__ __volatile__(
157 " .set push\n"
158 " .set mips32r2\n"
159 " rdhwr %0, $2\n"
160 " .set pop\n"
161 : "=r" (count));
162
163 return count;
164}
165
166#endif
167
168#ifdef CONFIG_CLKSRC_MIPS_GIC
169
170static __always_inline u64 read_gic_count(const struct vdso_data *data)
171{
172 void __iomem *gic = get_gic(data);
173 u32 hi, hi2, lo;
174
175 do {
176 hi = __raw_readl(gic + sizeof(lo));
177 lo = __raw_readl(gic);
178 hi2 = __raw_readl(gic + sizeof(lo));
179 } while (hi2 != hi);
180
181 return (((u64)hi) << 32) + lo;
182}
183
184#endif
185
186static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
187 const struct vdso_data *vd)
188{
189#ifdef CONFIG_CSRC_R4K
190 if (clock_mode == VDSO_CLOCKMODE_R4K)
191 return read_r4k_count();
192#endif
193#ifdef CONFIG_CLKSRC_MIPS_GIC
194 if (clock_mode == VDSO_CLOCKMODE_GIC)
195 return read_gic_count(vd);
196#endif
197 /*
198 * Core checks mode already. So this raced against a concurrent
199 * update. Return something. Core will do another round see the
200 * change and fallback to syscall.
201 */
202 return 0;
203}
204
205static inline bool mips_vdso_hres_capable(void)
206{
207 return IS_ENABLED(CONFIG_CSRC_R4K) ||
208 IS_ENABLED(CONFIG_CLKSRC_MIPS_GIC);
209}
210#define __arch_vdso_hres_capable mips_vdso_hres_capable
211
212static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
213{
214 return get_vdso_data();
215}
216
217#endif /* !__ASSEMBLY__ */
218
219#endif /* __ASM_VDSO_GETTIMEOFDAY_H */