xref: /openbmc/linux/arch/sparc/vdso/vclock_gettime.c (revision e3b9f1e8)
1 /*
2  * Copyright 2006 Andi Kleen, SUSE Labs.
3  * Subject to the GNU Public License, v.2
4  *
5  * Fast user context implementation of clock_gettime, gettimeofday, and time.
6  *
7  * The code should have no internal unresolved relocations.
8  * Check with readelf after changing.
9  * Also alternative() doesn't work.
10  */
11 /*
12  * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
13  */
14 
15 /* Disable profiling for userspace code: */
16 #ifndef	DISABLE_BRANCH_PROFILING
17 #define	DISABLE_BRANCH_PROFILING
18 #endif
19 
20 #include <linux/kernel.h>
21 #include <linux/time.h>
22 #include <linux/string.h>
23 #include <asm/io.h>
24 #include <asm/unistd.h>
25 #include <asm/timex.h>
26 #include <asm/clocksource.h>
27 #include <asm/vvar.h>
28 
29 #undef	TICK_PRIV_BIT
30 #ifdef	CONFIG_SPARC64
31 #define	TICK_PRIV_BIT	(1UL << 63)
32 #else
33 #define	TICK_PRIV_BIT	(1ULL << 63)
34 #endif
35 
36 #define SYSCALL_STRING							\
37 	"ta	0x6d;"							\
38 	"sub	%%g0, %%o0, %%o0;"					\
39 
40 #define SYSCALL_CLOBBERS						\
41 	"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",			\
42 	"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",		\
43 	"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",		\
44 	"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",		\
45 	"f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",		\
46 	"f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",		\
47 	"cc", "memory"
48 
49 /*
50  * Compute the vvar page's address in the process address space, and return it
51  * as a pointer to the vvar_data.
52  */
53 static notrace noinline struct vvar_data *
54 get_vvar_data(void)
55 {
56 	unsigned long ret;
57 
58 	/*
59 	 * vdso data page is the first vDSO page so grab the return address
60 	 * and move up a page to get to the data page.
61 	 */
62 	ret = (unsigned long)__builtin_return_address(0);
63 	ret &= ~(8192 - 1);
64 	ret -= 8192;
65 
66 	return (struct vvar_data *) ret;
67 }
68 
69 static notrace long
70 vdso_fallback_gettime(long clock, struct timespec *ts)
71 {
72 	register long num __asm__("g1") = __NR_clock_gettime;
73 	register long o0 __asm__("o0") = clock;
74 	register long o1 __asm__("o1") = (long) ts;
75 
76 	__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
77 			     "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
78 	return o0;
79 }
80 
81 static notrace __always_inline long
82 vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
83 {
84 	register long num __asm__("g1") = __NR_gettimeofday;
85 	register long o0 __asm__("o0") = (long) tv;
86 	register long o1 __asm__("o1") = (long) tz;
87 
88 	__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
89 			     "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
90 	return o0;
91 }
92 
93 #ifdef	CONFIG_SPARC64
94 static notrace noinline u64
95 vread_tick(void) {
96 	u64	ret;
97 
98 	__asm__ __volatile__("rd	%%asr24, %0 \n"
99 			     ".section	.vread_tick_patch, \"ax\" \n"
100 			     "rd	%%tick, %0 \n"
101 			     ".previous \n"
102 			     : "=&r" (ret));
103 	return ret & ~TICK_PRIV_BIT;
104 }
105 #else
106 static notrace noinline u64
107 vread_tick(void)
108 {
109 	unsigned int lo, hi;
110 
111 	__asm__ __volatile__("rd	%%asr24, %%g1\n\t"
112 			     "srlx	%%g1, 32, %1\n\t"
113 			     "srl	%%g1, 0, %0\n"
114 			     ".section	.vread_tick_patch, \"ax\" \n"
115 			     "rd	%%tick, %%g1\n"
116 			     ".previous \n"
117 			     : "=&r" (lo), "=&r" (hi)
118 			     :
119 			     : "g1");
120 	return lo | ((u64)hi << 32);
121 }
122 #endif
123 
124 static notrace inline u64
125 vgetsns(struct vvar_data *vvar)
126 {
127 	u64 v;
128 	u64 cycles;
129 
130 	cycles = vread_tick();
131 	v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
132 	return v * vvar->clock.mult;
133 }
134 
135 static notrace noinline int
136 do_realtime(struct vvar_data *vvar, struct timespec *ts)
137 {
138 	unsigned long seq;
139 	u64 ns;
140 
141 	ts->tv_nsec = 0;
142 	do {
143 		seq = vvar_read_begin(vvar);
144 		ts->tv_sec = vvar->wall_time_sec;
145 		ns = vvar->wall_time_snsec;
146 		ns += vgetsns(vvar);
147 		ns >>= vvar->clock.shift;
148 	} while (unlikely(vvar_read_retry(vvar, seq)));
149 
150 	timespec_add_ns(ts, ns);
151 
152 	return 0;
153 }
154 
155 static notrace noinline int
156 do_monotonic(struct vvar_data *vvar, struct timespec *ts)
157 {
158 	unsigned long seq;
159 	u64 ns;
160 
161 	ts->tv_nsec = 0;
162 	do {
163 		seq = vvar_read_begin(vvar);
164 		ts->tv_sec = vvar->monotonic_time_sec;
165 		ns = vvar->monotonic_time_snsec;
166 		ns += vgetsns(vvar);
167 		ns >>= vvar->clock.shift;
168 	} while (unlikely(vvar_read_retry(vvar, seq)));
169 
170 	timespec_add_ns(ts, ns);
171 
172 	return 0;
173 }
174 
175 static notrace noinline int
176 do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
177 {
178 	unsigned long seq;
179 
180 	do {
181 		seq = vvar_read_begin(vvar);
182 		ts->tv_sec = vvar->wall_time_coarse_sec;
183 		ts->tv_nsec = vvar->wall_time_coarse_nsec;
184 	} while (unlikely(vvar_read_retry(vvar, seq)));
185 	return 0;
186 }
187 
188 static notrace noinline int
189 do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
190 {
191 	unsigned long seq;
192 
193 	do {
194 		seq = vvar_read_begin(vvar);
195 		ts->tv_sec = vvar->monotonic_time_coarse_sec;
196 		ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
197 	} while (unlikely(vvar_read_retry(vvar, seq)));
198 
199 	return 0;
200 }
201 
202 notrace int
203 __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
204 {
205 	struct vvar_data *vvd = get_vvar_data();
206 
207 	switch (clock) {
208 	case CLOCK_REALTIME:
209 		if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
210 			break;
211 		return do_realtime(vvd, ts);
212 	case CLOCK_MONOTONIC:
213 		if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
214 			break;
215 		return do_monotonic(vvd, ts);
216 	case CLOCK_REALTIME_COARSE:
217 		return do_realtime_coarse(vvd, ts);
218 	case CLOCK_MONOTONIC_COARSE:
219 		return do_monotonic_coarse(vvd, ts);
220 	}
221 	/*
222 	 * Unknown clock ID ? Fall back to the syscall.
223 	 */
224 	return vdso_fallback_gettime(clock, ts);
225 }
226 int
227 clock_gettime(clockid_t, struct timespec *)
228 	__attribute__((weak, alias("__vdso_clock_gettime")));
229 
230 notrace int
231 __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
232 {
233 	struct vvar_data *vvd = get_vvar_data();
234 
235 	if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
236 		if (likely(tv != NULL)) {
237 			union tstv_t {
238 				struct timespec ts;
239 				struct timeval tv;
240 			} *tstv = (union tstv_t *) tv;
241 			do_realtime(vvd, &tstv->ts);
242 			/*
243 			 * Assign before dividing to ensure that the division is
244 			 * done in the type of tv_usec, not tv_nsec.
245 			 *
246 			 * There cannot be > 1 billion usec in a second:
247 			 * do_realtime() has already distributed such overflow
248 			 * into tv_sec.  So we can assign it to an int safely.
249 			 */
250 			tstv->tv.tv_usec = tstv->ts.tv_nsec;
251 			tstv->tv.tv_usec /= 1000;
252 		}
253 		if (unlikely(tz != NULL)) {
254 			/* Avoid memcpy. Some old compilers fail to inline it */
255 			tz->tz_minuteswest = vvd->tz_minuteswest;
256 			tz->tz_dsttime = vvd->tz_dsttime;
257 		}
258 		return 0;
259 	}
260 	return vdso_fallback_gettimeofday(tv, tz);
261 }
262 int
263 gettimeofday(struct timeval *, struct timezone *)
264 	__attribute__((weak, alias("__vdso_gettimeofday")));
265