~yolanda.robla/ubuntu/trusty/nodejs/add_distribution

« back to all changes in this revision

Viewing changes to deps/v8/src/runtime-profiler.cc

  • Committer: Package Import Robot
  • Author(s): Jérémy Lal
  • Date: 2013-08-14 00:16:46 UTC
  • mfrom: (7.1.40 sid)
  • Revision ID: package-import@ubuntu.com-20130814001646-bzlysfh8sd6mukbo
Tags: 0.10.15~dfsg1-4
* Update 2005 patch, adding a handful of tests that can fail on
  slow platforms.
* Add 1004 patch to fix test failures when writing NaN to buffer
  on mipsel.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
// Copyright 2011 the V8 project authors. All rights reserved.
 
1
// Copyright 2012 the V8 project authors. All rights reserved.
2
2
// Redistribution and use in source and binary forms, with or without
3
3
// modification, are permitted provided that the following conditions are
4
4
// met:
34
34
#include "compilation-cache.h"
35
35
#include "deoptimizer.h"
36
36
#include "execution.h"
 
37
#include "full-codegen.h"
37
38
#include "global-handles.h"
 
39
#include "isolate-inl.h"
38
40
#include "mark-compact.h"
39
41
#include "platform.h"
40
42
#include "scopeinfo.h"
45
47
 
46
48
// Optimization sampler constants.
47
49
static const int kSamplerFrameCount = 2;
 
50
 
 
51
// Constants for statistical profiler.
48
52
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
49
53
 
50
54
static const int kSamplerTicksBetweenThresholdAdjustment = 32;
57
61
 
58
62
static const int kSizeLimit = 1500;
59
63
 
 
64
// Constants for counter based profiler.
 
65
 
 
66
// Number of times a function has to be seen on the stack before it is
 
67
// optimized.
 
68
static const int kProfilerTicksBeforeOptimization = 2;
 
69
// If the function optimization was disabled due to high deoptimization count,
 
70
// but the function is hot and has been seen on the stack this number of times,
 
71
// then we try to reenable optimization for this function.
 
72
static const int kProfilerTicksBeforeReenablingOptimization = 250;
 
73
// If a function does not have enough type info (according to
 
74
// FLAG_type_info_threshold), but has seen a huge number of ticks,
 
75
// optimize it as it is.
 
76
static const int kTicksWhenNotEnoughTypeInfo = 100;
 
77
// We only have one byte to store the number of ticks.
 
78
STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
 
79
STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
 
80
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
 
81
 
 
82
 
 
83
// Maximum size in bytes of generated code for a function to be optimized
 
84
// the very first time it is seen on the stack.
 
85
static const int kMaxSizeEarlyOpt =
 
86
    5 * FullCodeGenerator::kBackEdgeDistanceUnit;
 
87
 
60
88
 
61
89
Atomic32 RuntimeProfiler::state_ = 0;
62
 
// TODO(isolates): Create the semaphore lazily and clean it up when no
63
 
// longer required.
64
 
Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
 
90
 
 
91
// TODO(isolates): Clean up the semaphore when it is no longer required.
 
92
static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
65
93
 
66
94
#ifdef DEBUG
67
 
bool RuntimeProfiler::has_been_globally_setup_ = false;
 
95
bool RuntimeProfiler::has_been_globally_set_up_ = false;
68
96
#endif
69
97
bool RuntimeProfiler::enabled_ = false;
70
98
 
75
103
      sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
76
104
      sampler_ticks_until_threshold_adjustment_(
77
105
          kSamplerTicksBetweenThresholdAdjustment),
78
 
      sampler_window_position_(0) {
 
106
      sampler_window_position_(0),
 
107
      any_ic_changed_(false),
 
108
      code_generated_(false) {
79
109
  ClearSampleBuffer();
80
110
}
81
111
 
82
112
 
83
 
void RuntimeProfiler::GlobalSetup() {
84
 
  ASSERT(!has_been_globally_setup_);
 
113
void RuntimeProfiler::GlobalSetUp() {
 
114
  ASSERT(!has_been_globally_set_up_);
85
115
  enabled_ = V8::UseCrankshaft() && FLAG_opt;
86
116
#ifdef DEBUG
87
 
  has_been_globally_setup_ = true;
 
117
  has_been_globally_set_up_ = true;
88
118
#endif
89
119
}
90
120
 
91
121
 
92
 
void RuntimeProfiler::Optimize(JSFunction* function) {
 
122
static void GetICCounts(JSFunction* function,
 
123
                        int* ic_with_type_info_count,
 
124
                        int* ic_total_count,
 
125
                        int* percentage) {
 
126
  *ic_total_count = 0;
 
127
  *ic_with_type_info_count = 0;
 
128
  Object* raw_info =
 
129
      function->shared()->code()->type_feedback_info();
 
130
  if (raw_info->IsTypeFeedbackInfo()) {
 
131
    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
 
132
    *ic_with_type_info_count = info->ic_with_type_info_count();
 
133
    *ic_total_count = info->ic_total_count();
 
134
  }
 
135
  *percentage = *ic_total_count > 0
 
136
      ? 100 * *ic_with_type_info_count / *ic_total_count
 
137
      : 100;
 
138
}
 
139
 
 
140
 
 
141
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
93
142
  ASSERT(function->IsOptimizable());
94
143
  if (FLAG_trace_opt) {
95
144
    PrintF("[marking ");
96
145
    function->PrintName();
97
146
    PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
98
 
    PrintF(" for recompilation");
 
147
    PrintF(" for recompilation, reason: %s", reason);
 
148
    if (FLAG_type_info_threshold > 0) {
 
149
      int typeinfo, total, percentage;
 
150
      GetICCounts(function, &typeinfo, &total, &percentage);
 
151
      PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
 
152
    }
99
153
    PrintF("]\n");
100
154
  }
101
155
 
102
 
  // The next call to the function will trigger optimization.
103
 
  function->MarkForLazyRecompilation();
 
156
  if (FLAG_parallel_recompilation) {
 
157
    function->MarkForParallelRecompilation();
 
158
  } else {
 
159
    // The next call to the function will trigger optimization.
 
160
    function->MarkForLazyRecompilation();
 
161
  }
104
162
}
105
163
 
106
164
 
107
165
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
108
166
  // See AlwaysFullCompiler (in compiler.cc) comment on why we need
109
167
  // Debug::has_break_points().
110
 
  ASSERT(function->IsMarkedForLazyRecompilation());
 
168
  ASSERT(function->IsMarkedForLazyRecompilation() ||
 
169
         function->IsMarkedForParallelRecompilation());
111
170
  if (!FLAG_use_osr ||
112
171
      isolate_->DebuggerHasBreakPoints() ||
113
172
      function->IsBuiltin()) {
134
193
 
135
194
  // Get the stack check stub code object to match against.  We aren't
136
195
  // prepared to generate it, but we don't expect to have to.
137
 
  StackCheckStub check_stub;
138
 
  Object* check_code;
139
 
  MaybeObject* maybe_check_code = check_stub.TryGetCode();
140
 
  if (maybe_check_code->ToObject(&check_code)) {
 
196
  bool found_code = false;
 
197
  Code* stack_check_code = NULL;
 
198
  if (FLAG_count_based_interrupts) {
 
199
    InterruptStub interrupt_stub;
 
200
    found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
 
201
  } else  // NOLINT
 
202
  {  // NOLINT
 
203
    StackCheckStub check_stub;
 
204
    found_code = check_stub.FindCodeInCache(&stack_check_code);
 
205
  }
 
206
  if (found_code) {
141
207
    Code* replacement_code =
142
208
        isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
143
209
    Code* unoptimized_code = shared->code();
144
210
    Deoptimizer::PatchStackCheckCode(unoptimized_code,
145
 
                                     Code::cast(check_code),
 
211
                                     stack_check_code,
146
212
                                     replacement_code);
147
213
  }
148
214
}
159
225
  for (int i = 0; i < kSamplerWindowSize; i++) {
160
226
    Object* sample = sampler_window_[i];
161
227
    if (sample != NULL) {
162
 
      if (function == sample) {
 
228
      bool fits = FLAG_lookup_sample_by_shared
 
229
          ? (function->shared() == JSFunction::cast(sample)->shared())
 
230
          : (function == JSFunction::cast(sample));
 
231
      if (fits) {
163
232
        weight += sampler_window_weight_[i];
164
233
      }
165
234
    }
186
255
  JSFunction* samples[kSamplerFrameCount];
187
256
  int sample_count = 0;
188
257
  int frame_count = 0;
 
258
  int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
 
259
                                                 : kSamplerFrameCount;
189
260
  for (JavaScriptFrameIterator it(isolate_);
190
 
       frame_count++ < kSamplerFrameCount && !it.done();
 
261
       frame_count++ < frame_count_limit && !it.done();
191
262
       it.Advance()) {
192
263
    JavaScriptFrame* frame = it.frame();
193
264
    JSFunction* function = JSFunction::cast(frame->function());
194
265
 
195
 
    // Adjust threshold each time we have processed
196
 
    // a certain number of ticks.
197
 
    if (sampler_ticks_until_threshold_adjustment_ > 0) {
198
 
      sampler_ticks_until_threshold_adjustment_--;
199
 
      if (sampler_ticks_until_threshold_adjustment_ <= 0) {
200
 
        // If the threshold is not already at the minimum
201
 
        // modify and reset the ticks until next adjustment.
202
 
        if (sampler_threshold_ > kSamplerThresholdMin) {
203
 
          sampler_threshold_ -= kSamplerThresholdDelta;
204
 
          sampler_ticks_until_threshold_adjustment_ =
205
 
              kSamplerTicksBetweenThresholdAdjustment;
 
266
    if (!FLAG_watch_ic_patching) {
 
267
      // Adjust threshold each time we have processed
 
268
      // a certain number of ticks.
 
269
      if (sampler_ticks_until_threshold_adjustment_ > 0) {
 
270
        sampler_ticks_until_threshold_adjustment_--;
 
271
        if (sampler_ticks_until_threshold_adjustment_ <= 0) {
 
272
          // If the threshold is not already at the minimum
 
273
          // modify and reset the ticks until next adjustment.
 
274
          if (sampler_threshold_ > kSamplerThresholdMin) {
 
275
            sampler_threshold_ -= kSamplerThresholdDelta;
 
276
            sampler_ticks_until_threshold_adjustment_ =
 
277
                kSamplerTicksBetweenThresholdAdjustment;
 
278
          }
206
279
        }
207
280
      }
208
281
    }
209
282
 
210
 
    if (function->IsMarkedForLazyRecompilation()) {
211
 
      Code* unoptimized = function->shared()->code();
212
 
      int nesting = unoptimized->allow_osr_at_loop_nesting_level();
 
283
    SharedFunctionInfo* shared = function->shared();
 
284
    Code* shared_code = shared->code();
 
285
 
 
286
    if (shared_code->kind() != Code::FUNCTION) continue;
 
287
 
 
288
    if (function->IsMarkedForLazyRecompilation() ||
 
289
        function->IsMarkedForParallelRecompilation()) {
 
290
      int nesting = shared_code->allow_osr_at_loop_nesting_level();
213
291
      if (nesting == 0) AttemptOnStackReplacement(function);
214
292
      int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
215
 
      unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
 
293
      shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
 
294
    }
 
295
 
 
296
    // Only record top-level code on top of the execution stack and
 
297
    // avoid optimizing excessively large scripts since top-level code
 
298
    // will be executed only once.
 
299
    const int kMaxToplevelSourceSize = 10 * 1024;
 
300
    if (shared->is_toplevel() &&
 
301
        (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
 
302
      continue;
216
303
    }
217
304
 
218
305
    // Do not record non-optimizable functions.
 
306
    if (shared->optimization_disabled()) {
 
307
      if (shared->deopt_count() >= FLAG_max_opt_count) {
 
308
        // If optimization was disabled due to many deoptimizations,
 
309
        // then check if the function is hot and try to reenable optimization.
 
310
        int ticks = shared_code->profiler_ticks();
 
311
        if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
 
312
          shared_code->set_profiler_ticks(0);
 
313
          shared->TryReenableOptimization();
 
314
        } else {
 
315
          shared_code->set_profiler_ticks(ticks + 1);
 
316
        }
 
317
      }
 
318
      continue;
 
319
    }
219
320
    if (!function->IsOptimizable()) continue;
220
 
    samples[sample_count++] = function;
221
 
 
222
 
    int function_size = function->shared()->SourceSize();
223
 
    int threshold_size_factor = (function_size > kSizeLimit)
224
 
        ? sampler_threshold_size_factor_
225
 
        : 1;
226
 
 
227
 
    int threshold = sampler_threshold_ * threshold_size_factor;
228
 
 
229
 
    if (LookupSample(function) >= threshold) {
230
 
      Optimize(function);
 
321
 
 
322
    if (FLAG_watch_ic_patching) {
 
323
      int ticks = shared_code->profiler_ticks();
 
324
 
 
325
      if (ticks >= kProfilerTicksBeforeOptimization) {
 
326
        int typeinfo, total, percentage;
 
327
        GetICCounts(function, &typeinfo, &total, &percentage);
 
328
        if (percentage >= FLAG_type_info_threshold) {
 
329
          // If this particular function hasn't had any ICs patched for enough
 
330
          // ticks, optimize it now.
 
331
          Optimize(function, "hot and stable");
 
332
        } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
 
333
          Optimize(function, "not much type info but very hot");
 
334
        } else {
 
335
          shared_code->set_profiler_ticks(ticks + 1);
 
336
          if (FLAG_trace_opt_verbose) {
 
337
            PrintF("[not yet optimizing ");
 
338
            function->PrintName();
 
339
            PrintF(", not enough type info: %d/%d (%d%%)]\n",
 
340
                   typeinfo, total, percentage);
 
341
          }
 
342
        }
 
343
      } else if (!any_ic_changed_ &&
 
344
                 shared_code->instruction_size() < kMaxSizeEarlyOpt) {
 
345
        // If no IC was patched since the last tick and this function is very
 
346
        // small, optimistically optimize it now.
 
347
        Optimize(function, "small function");
 
348
      } else {
 
349
        shared_code->set_profiler_ticks(ticks + 1);
 
350
      }
 
351
    } else {  // !FLAG_watch_ic_patching
 
352
      samples[sample_count++] = function;
 
353
 
 
354
      int function_size = function->shared()->SourceSize();
 
355
      int threshold_size_factor = (function_size > kSizeLimit)
 
356
          ? sampler_threshold_size_factor_
 
357
          : 1;
 
358
 
 
359
      int threshold = sampler_threshold_ * threshold_size_factor;
 
360
 
 
361
      if (LookupSample(function) >= threshold) {
 
362
        Optimize(function, "sampler window lookup");
 
363
      }
231
364
    }
232
365
  }
233
 
 
234
 
  // Add the collected functions as samples. It's important not to do
235
 
  // this as part of collecting them because this will interfere with
236
 
  // the sample lookup in case of recursive functions.
237
 
  for (int i = 0; i < sample_count; i++) {
238
 
    AddSample(samples[i], kSamplerFrameWeight[i]);
 
366
  if (FLAG_watch_ic_patching) {
 
367
    any_ic_changed_ = false;
 
368
  } else {  // !FLAG_watch_ic_patching
 
369
    // Add the collected functions as samples. It's important not to do
 
370
    // this as part of collecting them because this will interfere with
 
371
    // the sample lookup in case of recursive functions.
 
372
    for (int i = 0; i < sample_count; i++) {
 
373
      AddSample(samples[i], kSamplerFrameWeight[i]);
 
374
    }
239
375
  }
240
376
}
241
377
 
242
378
 
243
379
void RuntimeProfiler::NotifyTick() {
 
380
  if (FLAG_count_based_interrupts) return;
244
381
  isolate_->stack_guard()->RequestRuntimeProfilerTick();
245
382
}
246
383
 
247
384
 
248
 
void RuntimeProfiler::Setup() {
249
 
  ASSERT(has_been_globally_setup_);
250
 
  ClearSampleBuffer();
 
385
void RuntimeProfiler::SetUp() {
 
386
  ASSERT(has_been_globally_set_up_);
 
387
  if (!FLAG_watch_ic_patching) {
 
388
    ClearSampleBuffer();
 
389
  }
251
390
  // If the ticker hasn't already started, make sure to do so to get
252
391
  // the ticks for the runtime profiler.
253
392
  if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
255
394
 
256
395
 
257
396
void RuntimeProfiler::Reset() {
258
 
  sampler_threshold_ = kSamplerThresholdInit;
259
 
  sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
260
 
  sampler_ticks_until_threshold_adjustment_ =
261
 
      kSamplerTicksBetweenThresholdAdjustment;
 
397
  if (!FLAG_watch_ic_patching) {
 
398
    sampler_threshold_ = kSamplerThresholdInit;
 
399
    sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
 
400
    sampler_ticks_until_threshold_adjustment_ =
 
401
        kSamplerTicksBetweenThresholdAdjustment;
 
402
  }
262
403
}
263
404
 
264
405
 
295
436
  // undid the decrement done by the profiler thread. Increment again
296
437
  // to get the right count of active isolates.
297
438
  NoBarrier_AtomicIncrement(&state_, 1);
298
 
  semaphore_->Signal();
 
439
  semaphore.Pointer()->Signal();
299
440
}
300
441
 
301
442
 
308
449
  Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
309
450
  ASSERT(old_state >= -1);
310
451
  if (old_state != 0) return false;
311
 
  semaphore_->Wait();
 
452
  semaphore.Pointer()->Wait();
312
453
  return true;
313
454
}
314
455
 
324
465
  if (new_state == 0) {
325
466
    // The profiler thread is waiting. Wake it up. It must check for
326
467
    // stop conditions before attempting to wait again.
327
 
    semaphore_->Signal();
 
468
    semaphore.Pointer()->Signal();
328
469
  }
329
470
  thread->Join();
330
471
  // The profiler thread is now stopped. Undo the increment in case it
338
479
void RuntimeProfiler::RemoveDeadSamples() {
339
480
  for (int i = 0; i < kSamplerWindowSize; i++) {
340
481
    Object* function = sampler_window_[i];
341
 
    if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
 
482
    if (function != NULL &&
 
483
        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
342
484
      sampler_window_[i] = NULL;
343
485
    }
344
486
  }