62
45
static bool trace_available;
63
46
static bool trace_writeout_enabled;
65
static TraceRecord trace_buf[TRACE_BUF_LEN];
49
TRACE_BUF_LEN = 4096 * 64,
50
TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
53
uint8_t trace_buf[TRACE_BUF_LEN];
66
54
static unsigned int trace_idx;
55
static unsigned int writeout_idx;
56
static uint64_t dropped_events;
67
57
static FILE *trace_fp;
68
58
static char *trace_file_name = NULL;
60
/* * Trace buffer entry */
62
uint64_t event; /* TraceEventID */
63
uint64_t timestamp_ns;
64
uint32_t length; /* in bytes */
65
uint32_t reserved; /* unused */
70
uint64_t header_event_id; /* HEADER_EVENT_ID */
71
uint64_t header_magic; /* HEADER_MAGIC */
72
uint64_t header_version; /* HEADER_VERSION */
76
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
77
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
79
static void clear_buffer_range(unsigned int idx, size_t len)
83
if (idx >= TRACE_BUF_LEN) {
84
idx = idx % TRACE_BUF_LEN;
71
91
* Read a trace record from the trace buffer
76
96
* Returns false if the record is not valid.
78
static bool get_trace_record(unsigned int idx, TraceRecord *record)
98
static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
80
if (!(trace_buf[idx].event & TRACE_RECORD_VALID)) {
100
uint64_t event_flag = 0;
102
/* read the event flag to see if its a valid record */
103
read_from_buffer(idx, &record, sizeof(event_flag));
105
if (!(record.event & TRACE_RECORD_VALID)) {
84
__sync_synchronize(); /* read memory barrier before accessing record */
86
*record = trace_buf[idx];
87
record->event &= ~TRACE_RECORD_VALID;
109
smp_rmb(); /* read memory barrier before accessing record */
110
/* read the record header to know record length */
111
read_from_buffer(idx, &record, sizeof(TraceRecord));
112
*recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
113
/* make a copy of record to avoid being overwritten */
114
read_from_buffer(idx, *recordptr, record.length);
115
smp_rmb(); /* memory barrier before clearing valid flag */
116
(*recordptr)->event &= ~TRACE_RECORD_VALID;
117
/* clear the trace buffer range for consumed record otherwise any byte
118
* with its MSB set may be considered as a valid event id when the writer
119
* thread crosses this range of buffer again.
121
clear_buffer_range(idx, record.length);
121
155
static gpointer writeout_thread(gpointer opaque)
124
unsigned int writeout_idx = 0;
125
unsigned int num_available, idx;
157
TraceRecord *recordptr;
160
uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
162
unsigned int idx = 0;
163
uint64_t dropped_count;
126
164
size_t unused __attribute__ ((unused));
129
167
wait_for_trace_records_available();
131
num_available = trace_idx - writeout_idx;
132
if (num_available > TRACE_BUF_LEN) {
133
record = (TraceRecord){
134
.event = DROPPED_EVENT_ID,
137
unused = fwrite(&record, sizeof(record), 1, trace_fp);
138
writeout_idx += num_available;
169
if (dropped_events) {
170
dropped.rec.event = DROPPED_EVENT_ID,
171
dropped.rec.timestamp_ns = get_clock();
172
dropped.rec.length = sizeof(TraceRecord) + sizeof(dropped_events),
173
dropped.rec.reserved = 0;
175
dropped_count = dropped_events;
176
if (g_atomic_int_compare_and_exchange((gint *)&dropped_events,
181
memcpy(dropped.rec.arguments, &dropped_count, sizeof(uint64_t));
182
unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
141
idx = writeout_idx % TRACE_BUF_LEN;
142
while (get_trace_record(idx, &record)) {
143
trace_buf[idx].event = 0; /* clear valid bit */
144
unused = fwrite(&record, sizeof(record), 1, trace_fp);
145
idx = ++writeout_idx % TRACE_BUF_LEN;
185
while (get_trace_record(idx, &recordptr)) {
186
unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
187
writeout_idx += recordptr->length;
188
free(recordptr); /* dont use g_free, can deadlock when traced */
189
idx = writeout_idx % TRACE_BUF_LEN;
148
192
fflush(trace_fp);
153
static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3,
154
uint64_t x4, uint64_t x5, uint64_t x6)
159
if (!trace_list[event].state) {
163
timestamp = get_clock();
165
idx = g_atomic_int_exchange_and_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
166
trace_buf[idx] = (TraceRecord){
168
.timestamp_ns = timestamp,
176
__sync_synchronize(); /* write barrier before marking as valid */
177
trace_buf[idx].event |= TRACE_RECORD_VALID;
179
if ((idx + 1) % TRACE_BUF_FLUSH_THRESHOLD == 0) {
197
void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
199
rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
202
void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
204
/* Write string length first */
205
rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
206
/* Write actual string now */
207
rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
210
int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize)
212
unsigned int idx, rec_off, old_idx, new_idx;
213
uint32_t rec_len = sizeof(TraceRecord) + datasize;
214
uint64_t timestamp_ns = get_clock();
219
new_idx = old_idx + rec_len;
221
if (new_idx - writeout_idx > TRACE_BUF_LEN) {
222
/* Trace Buffer Full, Event dropped ! */
223
g_atomic_int_inc((gint *)&dropped_events);
227
if (g_atomic_int_compare_and_exchange((gint *)&trace_idx,
233
idx = old_idx % TRACE_BUF_LEN;
234
/* To check later if threshold crossed */
235
rec->next_tbuf_idx = new_idx % TRACE_BUF_LEN;
238
rec_off = write_to_buffer(rec_off, (uint8_t*)&event, sizeof(event));
239
rec_off = write_to_buffer(rec_off, (uint8_t*)×tamp_ns, sizeof(timestamp_ns));
240
rec_off = write_to_buffer(rec_off, (uint8_t*)&rec_len, sizeof(rec_len));
243
rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
247
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
249
uint8_t *data_ptr = dataptr;
252
if (idx >= TRACE_BUF_LEN) {
253
idx = idx % TRACE_BUF_LEN;
255
data_ptr[x++] = trace_buf[idx++];
259
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
261
uint8_t *data_ptr = dataptr;
264
if (idx >= TRACE_BUF_LEN) {
265
idx = idx % TRACE_BUF_LEN;
267
trace_buf[idx++] = data_ptr[x++];
269
return idx; /* most callers wants to know where to write next */
272
void trace_record_finish(TraceBufferRecord *rec)
274
uint8_t temp_rec[sizeof(TraceRecord)];
275
TraceRecord *record = (TraceRecord *) temp_rec;
276
read_from_buffer(rec->tbuf_idx, temp_rec, sizeof(TraceRecord));
277
smp_wmb(); /* write barrier before marking as valid */
278
record->event |= TRACE_RECORD_VALID;
279
write_to_buffer(rec->tbuf_idx, temp_rec, sizeof(TraceRecord));
281
if ((trace_idx - writeout_idx) > TRACE_BUF_FLUSH_THRESHOLD) {
180
282
flush_trace_file(false);
184
void trace0(TraceEventID event)
186
trace(event, 0, 0, 0, 0, 0, 0);
189
void trace1(TraceEventID event, uint64_t x1)
191
trace(event, x1, 0, 0, 0, 0, 0);
194
void trace2(TraceEventID event, uint64_t x1, uint64_t x2)
196
trace(event, x1, x2, 0, 0, 0, 0);
199
void trace3(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3)
201
trace(event, x1, x2, x3, 0, 0, 0);
204
void trace4(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4)
206
trace(event, x1, x2, x3, x4, 0, 0);
209
void trace5(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5)
211
trace(event, x1, x2, x3, x4, x5, 0);
214
void trace6(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6)
216
trace(event, x1, x2, x3, x4, x5, x6);
219
286
void st_set_trace_file_enabled(bool enable)
221
288
if (enable == !!trace_fp) {