117
* If the TSC is synchronized across all CPUs, read the current TSC
118
* and export its value in the nsec field of the timespec
120
notrace static noinline int do_trace_clock(struct timespec *ts)
123
union lttng_timespec *lts = (union lttng_timespec *) ts;
126
seq = read_seqbegin(>od->lock);
127
if (unlikely(!gtod->trace_clock_is_sync))
128
return vdso_fallback_gettime(CLOCK_TRACE, ts);
130
* We don't protect the rdtsc with the rdtsc_barrier because
131
* we can't obtain with tracing that level of precision.
132
* The operation of recording an event is not atomic therefore
133
* the small chance of imprecision doesn't justify the overhead
137
* TODO: check that vget_cycles(), using paravirt ops, will
138
* match the TSC read by get_cycles() at the kernel level.
140
lts->lttng_ts = vget_cycles();
141
} while (unlikely(read_seqretry(>od->lock, seq)));
147
* Returns the cpu_khz, it needs to be a syscall because we can't access
148
* this value from userspace and it will only be called at the beginning
149
* of the tracing session
151
notrace static noinline int do_trace_clock_freq(struct timespec *ts)
153
return vdso_fallback_gettime(CLOCK_TRACE_FREQ, ts);
114
156
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
116
158
if (likely(gtod->sysctl_enabled))
127
169
return do_realtime_coarse(ts);
128
170
case CLOCK_MONOTONIC_COARSE:
129
171
return do_monotonic_coarse(ts);
173
return do_trace_clock(ts);
174
case CLOCK_TRACE_FREQ:
175
return do_trace_clock_freq(ts);
131
179
return vdso_fallback_gettime(clock, ts);