1
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
6
#ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_
7
#define BASE_DEBUG_TRACE_EVENT_IMPL_H_
13
#include "base/atomicops.h"
14
#include "base/callback.h"
15
#include "base/containers/hash_tables.h"
16
#include "base/gtest_prod_util.h"
17
#include "base/memory/ref_counted_memory.h"
18
#include "base/memory/scoped_vector.h"
19
#include "base/observer_list.h"
20
#include "base/strings/string_util.h"
21
#include "base/synchronization/condition_variable.h"
22
#include "base/synchronization/lock.h"
23
#include "base/threading/thread.h"
24
#include "base/threading/thread_local.h"
25
#include "base/timer/timer.h"
27
// Older style trace macros with explicit id and extra data
28
// Only these macros result in publishing data to ETW as currently implemented.
29
#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
30
base::debug::TraceLog::AddTraceEventEtw( \
31
TRACE_EVENT_PHASE_BEGIN, \
32
name, reinterpret_cast<const void*>(id), extra)
34
#define TRACE_EVENT_END_ETW(name, id, extra) \
35
base::debug::TraceLog::AddTraceEventEtw( \
36
TRACE_EVENT_PHASE_END, \
37
name, reinterpret_cast<const void*>(id), extra)
39
#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
40
base::debug::TraceLog::AddTraceEventEtw( \
41
TRACE_EVENT_PHASE_INSTANT, \
42
name, reinterpret_cast<const void*>(id), extra)
44
template <typename Type>
45
struct DefaultSingletonTraits;
47
#if defined(COMPILER_GCC)
48
namespace BASE_HASH_NAMESPACE {
50
struct hash<base::MessageLoop*> {
51
std::size_t operator()(base::MessageLoop* value) const {
52
return reinterpret_cast<std::size_t>(value);
55
} // BASE_HASH_NAMESPACE
65
// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
66
// class must implement this interface.
67
class ConvertableToTraceFormat : public RefCounted<ConvertableToTraceFormat> {
69
// Append the class info to the provided |out| string. The appended
70
// data must be a valid JSON object. Strings must be properly quoted, and
71
// escaped. There is no processing applied to the content after it is
73
virtual void AppendAsTraceFormat(std::string* out) const = 0;
76
virtual ~ConvertableToTraceFormat() {}
79
friend class RefCounted<ConvertableToTraceFormat>;
82
struct TraceEventHandle {
88
const int kTraceMaxNumArgs = 2;
90
class BASE_EXPORT TraceEvent {
94
unsigned long long as_uint;
97
const void* as_pointer;
98
const char* as_string;
104
// We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
105
// Use explicit copy method to avoid accidentally misuse of copy.
106
void CopyFrom(const TraceEvent& other);
111
TimeTicks thread_timestamp,
113
const unsigned char* category_group_enabled,
115
unsigned long long id,
117
const char** arg_names,
118
const unsigned char* arg_types,
119
const unsigned long long* arg_values,
120
const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
121
unsigned char flags);
125
void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now);
127
// Serialize event data to JSON
128
void AppendAsJSON(std::string* out) const;
129
void AppendPrettyPrinted(std::ostringstream* out) const;
131
static void AppendValueAsJSON(unsigned char type,
135
TimeTicks timestamp() const { return timestamp_; }
136
TimeTicks thread_timestamp() const { return thread_timestamp_; }
137
char phase() const { return phase_; }
138
int thread_id() const { return thread_id_; }
139
TimeDelta duration() const { return duration_; }
140
TimeDelta thread_duration() const { return thread_duration_; }
141
unsigned long long id() const { return id_; }
142
unsigned char flags() const { return flags_; }
144
// Exposed for unittesting:
146
const base::RefCountedString* parameter_copy_storage() const {
147
return parameter_copy_storage_.get();
150
const unsigned char* category_group_enabled() const {
151
return category_group_enabled_;
154
const char* name() const { return name_; }
156
#if defined(OS_ANDROID)
161
// Note: these are ordered by size (largest first) for optimal packing.
162
TimeTicks timestamp_;
163
TimeTicks thread_timestamp_;
165
TimeDelta thread_duration_;
166
// id_ can be used to store phase-specific data.
167
unsigned long long id_;
168
TraceValue arg_values_[kTraceMaxNumArgs];
169
const char* arg_names_[kTraceMaxNumArgs];
170
scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
171
const unsigned char* category_group_enabled_;
173
scoped_refptr<base::RefCountedString> parameter_copy_storage_;
176
unsigned char flags_;
177
unsigned char arg_types_[kTraceMaxNumArgs];
179
DISALLOW_COPY_AND_ASSIGN(TraceEvent);
182
// TraceBufferChunk is the basic unit of TraceBuffer.
183
class BASE_EXPORT TraceBufferChunk {
185
TraceBufferChunk(uint32 seq)
190
void Reset(uint32 new_seq);
191
TraceEvent* AddTraceEvent(size_t* event_index);
192
bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
194
uint32 seq() const { return seq_; }
195
size_t capacity() const { return kTraceBufferChunkSize; }
196
size_t size() const { return next_free_; }
198
TraceEvent* GetEventAt(size_t index) {
199
DCHECK(index < size());
200
return &chunk_[index];
202
const TraceEvent* GetEventAt(size_t index) const {
203
DCHECK(index < size());
204
return &chunk_[index];
207
scoped_ptr<TraceBufferChunk> Clone() const;
209
static const size_t kTraceBufferChunkSize = 64;
213
TraceEvent chunk_[kTraceBufferChunkSize];
217
// TraceBuffer holds the events as they are collected.
218
class BASE_EXPORT TraceBuffer {
220
virtual ~TraceBuffer() {}
222
virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
223
virtual void ReturnChunk(size_t index,
224
scoped_ptr<TraceBufferChunk> chunk) = 0;
226
virtual bool IsFull() const = 0;
227
virtual size_t Size() const = 0;
228
virtual size_t Capacity() const = 0;
229
virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
231
// For iteration. Each TraceBuffer can only be iterated once.
232
virtual const TraceBufferChunk* NextChunk() = 0;
234
virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
237
// TraceResultBuffer collects and converts trace fragments returned by TraceLog
239
class BASE_EXPORT TraceResultBuffer {
241
typedef base::Callback<void(const std::string&)> OutputCallback;
243
// If you don't need to stream JSON chunks out efficiently, and just want to
244
// get a complete JSON string after calling Finish, use this struct to collect
245
// JSON trace output.
246
struct BASE_EXPORT SimpleOutput {
247
OutputCallback GetCallback();
248
void Append(const std::string& json_string);
250
// Do what you want with the json_output_ string after calling
251
// TraceResultBuffer::Finish.
252
std::string json_output;
256
~TraceResultBuffer();
258
// Set callback. The callback will be called during Start with the initial
259
// JSON output and during AddFragment and Finish with following JSON output
260
// chunks. The callback target must live past the last calls to
261
// TraceResultBuffer::Start/AddFragment/Finish.
262
void SetOutputCallback(const OutputCallback& json_chunk_callback);
264
// Start JSON output. This resets all internal state, so you can reuse
265
// the TraceResultBuffer by calling Start.
268
// Call AddFragment 0 or more times to add trace fragments from TraceLog.
269
void AddFragment(const std::string& trace_fragment);
271
// When all fragments have been added, call Finish to complete the JSON
276
OutputCallback output_callback_;
280
class BASE_EXPORT CategoryFilter {
282
typedef std::vector<std::string> StringList;
284
// The default category filter, used when none is provided.
285
// Allows all categories through, except if they end in the suffix 'Debug' or
287
static const char* kDefaultCategoryFilterString;
289
// |filter_string| is a comma-delimited list of category wildcards.
290
// A category can have an optional '-' prefix to make it an excluded category.
291
// All the same rules apply above, so for example, having both included and
292
// excluded categories in the same list would not be supported.
294
// Example: CategoryFilter"test_MyTest*");
295
// Example: CategoryFilter("test_MyTest*,test_OtherStuff");
296
// Example: CategoryFilter("-excluded_category1,-excluded_category2");
297
// Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
298
// Example: CategoryFilter("-webkit"); would enable everything but webkit.
300
// Category filters can also be used to configure synthetic delays.
302
// Example: CategoryFilter("DELAY(gpu.SwapBuffers;16)"); would make swap
303
// buffers always take at least 16 ms.
304
// Example: CategoryFilter("DELAY(gpu.SwapBuffers;16;oneshot)"); would
305
// make swap buffers take at least 16 ms the first time it is called.
306
// Example: CategoryFilter("DELAY(gpu.SwapBuffers;16;alternating)"); would
307
// make swap buffers take at least 16 ms every other time it is
309
explicit CategoryFilter(const std::string& filter_string);
311
CategoryFilter(const CategoryFilter& cf);
315
CategoryFilter& operator=(const CategoryFilter& rhs);
317
// Writes the string representation of the CategoryFilter. This is a comma
318
// separated string, similar in nature to the one used to determine
319
// enabled/disabled category patterns, except here there is an arbitrary
320
// order, included categories go first, then excluded categories. Excluded
321
// categories are distinguished from included categories by the prefix '-'.
322
std::string ToString() const;
324
// Determines whether category group would be enabled or
325
// disabled by this category filter.
326
bool IsCategoryGroupEnabled(const char* category_group) const;
328
// Return a list of the synthetic delays specified in this category filter.
329
const StringList& GetSyntheticDelayValues() const;
331
// Merges nested_filter with the current CategoryFilter
332
void Merge(const CategoryFilter& nested_filter);
334
// Clears both included/excluded pattern lists. This would be equivalent to
335
// creating a CategoryFilter with an empty string, through the constructor.
336
// i.e: CategoryFilter("").
338
// When using an empty filter, all categories are considered included as we
339
// are not excluding anything.
343
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter);
345
static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
346
const std::string& str);
348
void Initialize(const std::string& filter_string);
349
void WriteString(const StringList& values,
351
bool included) const;
352
void WriteString(const StringList& delays, std::string* out) const;
353
bool HasIncludedPatterns() const;
355
bool DoesCategoryGroupContainCategory(const char* category_group,
356
const char* category) const;
358
StringList included_;
359
StringList disabled_;
360
StringList excluded_;
364
class TraceSamplingThread;
366
class BASE_EXPORT TraceLog {
374
// Options determines how the trace buffer stores data.
376
// Record until the trace buffer is full.
377
RECORD_UNTIL_FULL = 1 << 0,
379
// Record until the user ends the trace. The trace buffer is a fixed size
380
// and we use it as a ring buffer during recording.
381
RECORD_CONTINUOUSLY = 1 << 1,
383
// Enable the sampling profiler in the recording mode.
384
ENABLE_SAMPLING = 1 << 2,
386
// Echo to console. Events are discarded.
387
ECHO_TO_CONSOLE = 1 << 3,
390
// The pointer returned from GetCategoryGroupEnabledInternal() points to a
391
// value with zero or more of the following bits. Used in this class only.
392
// The TRACE_EVENT macros should only use the value as a bool.
393
// These values must be in sync with macro values in TraceEvent.h in Blink.
394
enum CategoryGroupEnabledFlags {
395
// Category group enabled for the recording mode.
396
ENABLED_FOR_RECORDING = 1 << 0,
397
// Category group enabled for the monitoring mode.
398
ENABLED_FOR_MONITORING = 1 << 1,
399
// Category group enabled by SetEventCallbackEnabled().
400
ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
403
static TraceLog* GetInstance();
405
// Get set of known category groups. This can change as new code paths are
406
// reached. The known category groups are inserted into |category_groups|.
407
void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
409
// Retrieves a copy (for thread-safety) of the current CategoryFilter.
410
CategoryFilter GetCurrentCategoryFilter();
412
Options trace_options() const {
413
return static_cast<Options>(subtle::NoBarrier_Load(&trace_options_));
416
// Enables normal tracing (recording trace events in the trace buffer).
417
// See CategoryFilter comments for details on how to control what categories
418
// will be traced. If tracing has already been enabled, |category_filter| will
419
// be merged into the current category filter.
420
void SetEnabled(const CategoryFilter& category_filter,
421
Mode mode, Options options);
423
// Disables normal tracing for all categories.
426
bool IsEnabled() { return mode_ != DISABLED; }
428
// The number of times we have begun recording traces. If tracing is off,
429
// returns -1. If tracing is on, then it returns the number of times we have
430
// recorded a trace. By watching for this number to increment, you can
431
// passively discover when a new trace has begun. This is then used to
432
// implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
433
int GetNumTracesRecorded();
435
#if defined(OS_ANDROID)
438
void AddClockSyncMetadataEvent();
441
// Enabled state listeners give a callback when tracing is enabled or
442
// disabled. This can be used to tie into other library's tracing systems
444
class EnabledStateObserver {
446
// Called just after the tracing system becomes enabled, outside of the
447
// |lock_|. TraceLog::IsEnabled() is true at this point.
448
virtual void OnTraceLogEnabled() = 0;
450
// Called just after the tracing system disables, outside of the |lock_|.
451
// TraceLog::IsEnabled() is false at this point.
452
virtual void OnTraceLogDisabled() = 0;
454
void AddEnabledStateObserver(EnabledStateObserver* listener);
455
void RemoveEnabledStateObserver(EnabledStateObserver* listener);
456
bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
458
float GetBufferPercentFull() const;
459
bool BufferIsFull() const;
461
// Not using base::Callback because of its limited by 7 parameters.
462
// Also, using primitive type allows directly passing callback from WebCore.
463
// WARNING: It is possible for the previously set callback to be called
464
// after a call to SetEventCallbackEnabled() that replaces or a call to
465
// SetEventCallbackDisabled() that disables the callback.
466
// This callback may be invoked on any thread.
467
// For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
468
// of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
470
typedef void (*EventCallback)(TimeTicks timestamp,
472
const unsigned char* category_group_enabled,
474
unsigned long long id,
476
const char* const arg_names[],
477
const unsigned char arg_types[],
478
const unsigned long long arg_values[],
479
unsigned char flags);
481
// Enable tracing for EventCallback.
482
void SetEventCallbackEnabled(const CategoryFilter& category_filter,
484
void SetEventCallbackDisabled();
486
// Flush all collected events to the given output callback. The callback will
487
// be called one or more times either synchronously or asynchronously from
488
// the current thread with IPC-bite-size chunks. The string format is
489
// undefined. Use TraceResultBuffer to convert one or more trace strings to
490
// JSON. The callback can be null if the caller doesn't want any data.
491
// Due to the implementation of thread-local buffers, flush can't be
492
// done when tracing is enabled. If called when tracing is enabled, the
493
// callback will be called directly with (empty_string, false) to indicate
494
// the end of this unsuccessful flush.
495
typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
496
bool has_more_events)> OutputCallback;
497
void Flush(const OutputCallback& cb);
498
void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
500
// Called by TRACE_EVENT* macros, don't call this directly.
501
// The name parameter is a category group for example:
502
// TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
503
static const unsigned char* GetCategoryGroupEnabled(const char* name);
504
static const char* GetCategoryGroupName(
505
const unsigned char* category_group_enabled);
507
// Called by TRACE_EVENT* macros, don't call this directly.
508
// If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
509
// into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
510
TraceEventHandle AddTraceEvent(
512
const unsigned char* category_group_enabled,
514
unsigned long long id,
516
const char** arg_names,
517
const unsigned char* arg_types,
518
const unsigned long long* arg_values,
519
const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
520
unsigned char flags);
521
TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
523
const unsigned char* category_group_enabled,
525
unsigned long long id,
527
const TimeTicks& timestamp,
529
const char** arg_names,
530
const unsigned char* arg_types,
531
const unsigned long long* arg_values,
532
const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
533
unsigned char flags);
534
static void AddTraceEventEtw(char phase,
535
const char* category_group,
538
static void AddTraceEventEtw(char phase,
539
const char* category_group,
541
const std::string& extra);
543
void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
545
TraceEventHandle handle);
547
// For every matching event, the callback will be called.
548
typedef base::Callback<void()> WatchEventCallback;
549
void SetWatchEvent(const std::string& category_name,
550
const std::string& event_name,
551
const WatchEventCallback& callback);
552
// Cancel the watch event. If tracing is enabled, this may race with the
553
// watch event notification firing.
554
void CancelWatchEvent();
556
int process_id() const { return process_id_; }
558
// Exposed for unittesting:
560
void WaitSamplingEventForTesting();
562
// Allows deleting our singleton instance.
563
static void DeleteForTesting();
565
// Allow tests to inspect TraceEvents.
566
size_t GetEventsSize() const { return logged_events_->Size(); }
567
TraceEvent* GetEventByHandle(TraceEventHandle handle);
569
void SetProcessID(int process_id);
571
// Process sort indices, if set, override the order of a process will appear
572
// relative to other processes in the trace viewer. Processes are sorted first
573
// on their sort index, ascending, then by their name, and then tid.
574
void SetProcessSortIndex(int sort_index);
576
// Sets the name of the process.
577
void SetProcessName(const std::string& process_name);
579
// Processes can have labels in addition to their names. Use labels, for
580
// instance, to list out the web page titles that a process is handling.
581
void UpdateProcessLabel(int label_id, const std::string& current_label);
582
void RemoveProcessLabel(int label_id);
584
// Thread sort indices, if set, override the order of a thread will appear
585
// within its process in the trace viewer. Threads are sorted first on their
586
// sort index, ascending, then by their name, and then tid.
587
void SetThreadSortIndex(PlatformThreadId , int sort_index);
589
// Allow setting an offset between the current TimeTicks time and the time
590
// that should be reported.
591
void SetTimeOffset(TimeDelta offset);
593
size_t GetObserverCountForTest() const;
595
// Call this method if the current thread may block the message loop to
596
// prevent the thread from using the thread-local buffer because the thread
597
// may not handle the flush request in time causing lost of unflushed events.
598
void SetCurrentThreadBlocksMessageLoop();
601
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
602
TraceBufferRingBufferGetReturnChunk);
603
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
604
TraceBufferRingBufferHalfIteration);
605
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
606
TraceBufferRingBufferFullIteration);
608
// This allows constructor and destructor to be private and usable only
609
// by the Singleton class.
610
friend struct DefaultSingletonTraits<TraceLog>;
612
// Enable/disable each category group based on the current mode_,
613
// category_filter_, event_callback_ and event_callback_category_filter_.
614
// Enable the category group in the enabled mode if category_filter_ matches
615
// the category group, or event_callback_ is not null and
616
// event_callback_category_filter_ matches the category group.
617
void UpdateCategoryGroupEnabledFlags();
618
void UpdateCategoryGroupEnabledFlag(int category_index);
620
// Configure synthetic delays based on the values set in the current
622
void UpdateSyntheticDelaysFromCategoryFilter();
624
class ThreadLocalEventBuffer;
625
class OptionalAutoLock;
629
const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
630
void AddMetadataEventsWhileLocked();
632
TraceBuffer* trace_buffer() const { return logged_events_.get(); }
633
TraceBuffer* CreateTraceBuffer();
635
std::string EventToConsoleMessage(unsigned char phase,
636
const TimeTicks& timestamp,
637
TraceEvent* trace_event);
639
TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
640
bool check_buffer_is_full);
641
void CheckIfBufferIsFullWhileLocked();
642
void SetDisabledWhileLocked();
644
TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
645
OptionalAutoLock* lock);
647
// |generation| is used in the following callbacks to check if the callback
648
// is called for the flush of the current |logged_events_|.
649
void FlushCurrentThread(int generation);
650
void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events,
651
const TraceLog::OutputCallback& flush_output_callback);
652
void FinishFlush(int generation);
653
void OnFlushTimeout(int generation);
655
int generation() const {
656
return static_cast<int>(subtle::NoBarrier_Load(&generation_));
658
bool CheckGeneration(int generation) const {
659
return generation == this->generation();
661
void UseNextTraceBuffer();
663
TimeTicks OffsetNow() const {
664
return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
666
TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
667
return timestamp - time_offset_;
670
// This lock protects TraceLog member accesses (except for members protected
671
// by thread_info_lock_) from arbitrary threads.
673
// This lock protects accesses to thread_names_, thread_event_start_times_
674
// and thread_colors_.
675
Lock thread_info_lock_;
678
int num_traces_recorded_;
679
scoped_ptr<TraceBuffer> logged_events_;
680
subtle::AtomicWord /* EventCallback */ event_callback_;
681
bool dispatching_to_observer_list_;
682
std::vector<EnabledStateObserver*> enabled_state_observer_list_;
684
std::string process_name_;
685
base::hash_map<int, std::string> process_labels_;
686
int process_sort_index_;
687
base::hash_map<int, int> thread_sort_indices_;
688
base::hash_map<int, std::string> thread_names_;
690
// The following two maps are used only when ECHO_TO_CONSOLE.
691
base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_;
692
base::hash_map<std::string, int> thread_colors_;
694
// XORed with TraceID to make it unlikely to collide with other processes.
695
unsigned long long process_id_hash_;
699
TimeDelta time_offset_;
701
// Allow tests to wake up when certain events occur.
702
WatchEventCallback watch_event_callback_;
703
subtle::AtomicWord /* const unsigned char* */ watch_category_;
704
std::string watch_event_name_;
706
subtle::AtomicWord /* Options */ trace_options_;
708
// Sampling thread handles.
709
scoped_ptr<TraceSamplingThread> sampling_thread_;
710
PlatformThreadHandle sampling_thread_handle_;
712
CategoryFilter category_filter_;
713
CategoryFilter event_callback_category_filter_;
715
ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
716
ThreadLocalBoolean thread_blocks_message_loop_;
717
ThreadLocalBoolean thread_is_in_trace_event_;
719
// Contains the message loops of threads that have had at least one event
720
// added into the local event buffer. Not using MessageLoopProxy because we
721
// need to know the life time of the message loops.
722
hash_set<MessageLoop*> thread_message_loops_;
724
// For events which can't be added into the thread local buffer, e.g. events
725
// from threads without a message loop.
726
scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
727
size_t thread_shared_chunk_index_;
729
// Set when asynchronous Flush is in progress.
730
OutputCallback flush_output_callback_;
731
scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_;
732
subtle::AtomicWord generation_;
734
DISALLOW_COPY_AND_ASSIGN(TraceLog);
740
#endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_