2
2
* Copyright 2006 Andi Kleen, SUSE Labs.
3
3
* Subject to the GNU Public License, v.2
5
* Fast user context implementation of clock_gettime and gettimeofday.
5
* Fast user context implementation of clock_gettime, gettimeofday, and time.
7
7
* The code should have no internal unresolved relocations.
8
8
* Check with readelf after changing.
22
22
#include <asm/hpet.h>
23
23
#include <asm/unistd.h>
24
24
#include <asm/io.h>
25
#include <asm/trace-clock.h>
26
#include <asm/timer.h>
29
#define gtod vdso_vsyscall_gtod_data
26
#define gtod (&VVAR(vsyscall_gtod_data))
31
28
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
61
/* Copy of the version in kernel/time.c which we cannot directly access */
63
vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
65
while (nsec >= NSEC_PER_SEC) {
77
58
notrace static noinline int do_monotonic(struct timespec *ts)
79
60
unsigned long seq, ns, secs;
84
65
secs += gtod->wall_to_monotonic.tv_sec;
85
66
ns += gtod->wall_to_monotonic.tv_nsec;
86
67
} while (unlikely(read_seqretry(>od->lock, seq)));
87
vset_normalized_timespec(ts, secs, ns);
69
/* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
70
* are all guaranteed to be nonnegative.
72
while (ns >= NSEC_PER_SEC) {
109
100
secs += gtod->wall_to_monotonic.tv_sec;
110
101
ns += gtod->wall_to_monotonic.tv_nsec;
111
102
} while (unlikely(read_seqretry(>od->lock, seq)));
112
vset_normalized_timespec(ts, secs, ns);
117
* If the TSC is synchronized across all CPUs, read the current TSC
118
* and export its value in the nsec field of the timespec
120
notrace static noinline int do_trace_clock(struct timespec *ts)
123
union lttng_timespec *lts = (union lttng_timespec *) ts;
126
seq = read_seqbegin(>od->lock);
127
if (unlikely(!gtod->trace_clock_is_sync))
128
return vdso_fallback_gettime(CLOCK_TRACE, ts);
130
* We don't protect the rdtsc with the rdtsc_barrier because
131
* we can't obtain with tracing that level of precision.
132
* The operation of recording an event is not atomic therefore
133
* the small chance of imprecision doesn't justify the overhead
137
* TODO: check that vget_cycles(), using paravirt ops, will
138
* match the TSC read by get_cycles() at the kernel level.
140
lts->lttng_ts = vget_cycles();
141
} while (unlikely(read_seqretry(>od->lock, seq)));
147
* Returns the cpu_khz, it needs to be a syscall because we can't access
148
* this value from userspace and it will only be called at the beginning
149
* of the tracing session
151
notrace static noinline int do_trace_clock_freq(struct timespec *ts)
153
return vdso_fallback_gettime(CLOCK_TRACE_FREQ, ts);
104
/* wall_time_nsec and wall_to_monotonic.tv_nsec are
105
* guaranteed to be between 0 and NSEC_PER_SEC.
107
if (ns >= NSEC_PER_SEC) {
156
117
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
169
130
return do_realtime_coarse(ts);
170
131
case CLOCK_MONOTONIC_COARSE:
171
132
return do_monotonic_coarse(ts);
173
return do_trace_clock(ts);
174
case CLOCK_TRACE_FREQ:
175
return do_trace_clock_freq(ts);
179
134
return vdso_fallback_gettime(clock, ts);
206
161
int gettimeofday(struct timeval *, struct timezone *)
207
162
__attribute__((weak, alias("__vdso_gettimeofday")));
164
/* This will break when the xtime seconds get inaccurate, but that is
167
static __always_inline long time_syscall(long *t)
170
asm volatile("syscall"
172
: "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
176
notrace time_t __vdso_time(time_t *t)
180
if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
181
return time_syscall(t);
183
/* This is atomic on x86_64 so we don't need any locks. */
184
result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
191
__attribute__((weak, alias("__vdso_time")));