~ubuntu-branches/ubuntu/trusty/libv8/trusty

« back to all changes in this revision

Viewing changes to src/deoptimizer.cc

  • Committer: Package Import Robot
  • Author(s): Jérémy Lal
  • Date: 2012-02-20 14:08:17 UTC
  • mfrom: (15.1.24 sid)
  • Revision ID: package-import@ubuntu.com-20120220140817-bsvmeoa4sxsj5hbz
Tags: 3.7.12.22-3
Fix mipsel build, allow test debug-step-3 to fail (non-crucial)

Show diffs side-by-side

added added

removed removed

Lines of Context:
52
52
 
53
53
DeoptimizerData::~DeoptimizerData() {
54
54
  if (eager_deoptimization_entry_code_ != NULL) {
55
 
    eager_deoptimization_entry_code_->Free(EXECUTABLE);
 
55
    Isolate::Current()->memory_allocator()->Free(
 
56
        eager_deoptimization_entry_code_);
56
57
    eager_deoptimization_entry_code_ = NULL;
57
58
  }
58
59
  if (lazy_deoptimization_entry_code_ != NULL) {
59
 
    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
 
60
    Isolate::Current()->memory_allocator()->Free(
 
61
        lazy_deoptimization_entry_code_);
60
62
    lazy_deoptimization_entry_code_ = NULL;
61
63
  }
62
64
}
71
73
#endif
72
74
 
73
75
 
 
76
// We rely on this function not causing a GC.  It is called from generated code
 
77
// without having a real stack frame in place.
74
78
Deoptimizer* Deoptimizer::New(JSFunction* function,
75
79
                              BailoutType type,
76
80
                              unsigned bailout_id,
112
116
  // Get the function and code from the frame.
113
117
  JSFunction* function = JSFunction::cast(frame->function());
114
118
  Code* code = frame->LookupCode();
115
 
  Address code_start_address = code->instruction_start();
116
119
 
117
120
  // Locate the deoptimization point in the code. As we are at a call the
118
121
  // return address must be at a place in the code with deoptimization support.
119
 
  int deoptimization_index = Safepoint::kNoDeoptimizationIndex;
120
 
  // Scope this as the safe point constructor will disallow allocation.
121
 
  {
122
 
    SafepointTable table(code);
123
 
    for (unsigned i = 0; i < table.length(); ++i) {
124
 
      Address address = code_start_address + table.GetPcOffset(i);
125
 
      if (address == frame->pc()) {
126
 
        SafepointEntry safepoint_entry = table.GetEntry(i);
127
 
        ASSERT(safepoint_entry.deoptimization_index() !=
128
 
               Safepoint::kNoDeoptimizationIndex);
129
 
        deoptimization_index = safepoint_entry.deoptimization_index();
130
 
        break;
131
 
      }
132
 
    }
133
 
  }
 
122
  SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
 
123
  int deoptimization_index = safepoint_entry.deoptimization_index();
134
124
  ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
135
125
 
136
126
  // Always use the actual stack slots when calculating the fp to sp
274
264
  AssertNoAllocation no_allocation;
275
265
 
276
266
  // Run through the list of all global contexts and deoptimize.
277
 
  Object* global = Isolate::Current()->heap()->global_contexts_list();
278
 
  while (!global->IsUndefined()) {
279
 
    VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
280
 
                                              visitor);
281
 
    global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
 
267
  Object* context = Isolate::Current()->heap()->global_contexts_list();
 
268
  while (!context->IsUndefined()) {
 
269
    // GC can happen when the context is not fully initialized,
 
270
    // so the global field of the context can be undefined.
 
271
    Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
 
272
    if (!global->IsUndefined()) {
 
273
      VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
 
274
                                                visitor);
 
275
    }
 
276
    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
282
277
  }
283
278
}
284
279
 
319
314
      input_(NULL),
320
315
      output_count_(0),
321
316
      output_(NULL),
 
317
      frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
 
318
      has_alignment_padding_(0),
322
319
      deferred_heap_numbers_(0) {
323
320
  if (FLAG_trace_deopt && type != OSR) {
324
321
    if (type == DEBUGGER) {
343
340
  if (type == EAGER) {
344
341
    ASSERT(from == NULL);
345
342
    optimized_code_ = function_->code();
 
343
    if (FLAG_trace_deopt && FLAG_code_comments) {
 
344
      // Print instruction associated with this bailout.
 
345
      const char* last_comment = NULL;
 
346
      int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
 
347
          | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
 
348
      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
 
349
        RelocInfo* info = it.rinfo();
 
350
        if (info->rmode() == RelocInfo::COMMENT) {
 
351
          last_comment = reinterpret_cast<const char*>(info->data());
 
352
        }
 
353
        if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
 
354
          unsigned id = Deoptimizer::GetDeoptimizationId(
 
355
              info->target_address(), Deoptimizer::EAGER);
 
356
          if (id == bailout_id && last_comment != NULL) {
 
357
            PrintF("            %s\n", last_comment);
 
358
            break;
 
359
          }
 
360
        }
 
361
      }
 
362
    }
346
363
  } else if (type == LAZY) {
347
364
    optimized_code_ = FindDeoptimizingCodeFromAddress(from);
348
365
    ASSERT(optimized_code_ != NULL);
386
403
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
387
404
  ASSERT(id >= 0);
388
405
  if (id >= kNumberOfEntries) return NULL;
389
 
  LargeObjectChunk* base = NULL;
 
406
  MemoryChunk* base = NULL;
390
407
  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
391
408
  if (type == EAGER) {
392
409
    if (data->eager_deoptimization_entry_code_ == NULL) {
400
417
    base = data->lazy_deoptimization_entry_code_;
401
418
  }
402
419
  return
403
 
      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
 
420
      static_cast<Address>(base->body()) + (id * table_entry_size_);
404
421
}
405
422
 
406
423
 
407
424
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
408
 
  LargeObjectChunk* base = NULL;
 
425
  MemoryChunk* base = NULL;
409
426
  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
410
427
  if (type == EAGER) {
411
428
    base = data->eager_deoptimization_entry_code_;
413
430
    base = data->lazy_deoptimization_entry_code_;
414
431
  }
415
432
  if (base == NULL ||
416
 
      addr < base->GetStartAddress() ||
417
 
      addr >= base->GetStartAddress() +
 
433
      addr < base->body() ||
 
434
      addr >= base->body() +
418
435
          (kNumberOfEntries * table_entry_size_)) {
419
436
    return kNotDeoptimizationEntry;
420
437
  }
421
438
  ASSERT_EQ(0,
422
 
      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
423
 
  return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
 
439
      static_cast<int>(addr - base->body()) % table_entry_size_);
 
440
  return static_cast<int>(addr - base->body()) / table_entry_size_;
424
441
}
425
442
 
426
443
 
462
479
}
463
480
 
464
481
 
 
482
// We rely on this function not causing a GC.  It is called from generated code
 
483
// without having a real stack frame in place.
465
484
void Deoptimizer::DoComputeOutputFrames() {
466
485
  if (bailout_type_ == OSR) {
467
486
    DoComputeOsrOutputFrame();
613
632
      intptr_t input_value = input_->GetRegister(input_reg);
614
633
      if (FLAG_trace_deopt) {
615
634
        PrintF(
616
 
            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
 
635
            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
617
636
            output_[frame_index]->GetTop() + output_offset,
618
637
            output_offset,
619
638
            input_value,
620
639
            converter.NameOfCPURegister(input_reg));
 
640
        reinterpret_cast<Object*>(input_value)->ShortPrint();
 
641
        PrintF("\n");
621
642
      }
622
643
      output_[frame_index]->SetFrameSlot(output_offset, input_value);
623
644
      return;
675
696
      if (FLAG_trace_deopt) {
676
697
        PrintF("    0x%08" V8PRIxPTR ": ",
677
698
               output_[frame_index]->GetTop() + output_offset);
678
 
        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
 
699
        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
679
700
               output_offset,
680
701
               input_value,
681
702
               input_offset);
 
703
        reinterpret_cast<Object*>(input_value)->ShortPrint();
 
704
        PrintF("\n");
682
705
      }
683
706
      output_[frame_index]->SetFrameSlot(output_offset, input_value);
684
707
      return;
850
873
      unsigned output_offset =
851
874
          output->GetOffsetFromSlotIndex(this, output_index);
852
875
      if (FLAG_trace_osr) {
853
 
        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
 
876
        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
854
877
               output_offset,
855
878
               input_value,
856
879
               *input_offset);
 
880
        reinterpret_cast<Object*>(input_value)->ShortPrint();
 
881
        PrintF("\n");
857
882
      }
858
883
      output->SetFrameSlot(output_offset, input_value);
859
884
      break;
953
978
  for (uint32_t i = 0; i < table_length; ++i) {
954
979
    uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
955
980
    Address pc_after = unoptimized_code->instruction_start() + pc_offset;
956
 
    PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
 
981
    PatchStackCheckCodeAt(unoptimized_code,
 
982
                          pc_after,
 
983
                          check_code,
 
984
                          replacement_code);
957
985
    stack_check_cursor += 2 * kIntSize;
958
986
  }
959
987
}
972
1000
  for (uint32_t i = 0; i < table_length; ++i) {
973
1001
    uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
974
1002
    Address pc_after = unoptimized_code->instruction_start() + pc_offset;
975
 
    RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
 
1003
    RevertStackCheckCodeAt(unoptimized_code,
 
1004
                           pc_after,
 
1005
                           check_code,
 
1006
                           replacement_code);
976
1007
    stack_check_cursor += 2 * kIntSize;
977
1008
  }
978
1009
}
1039
1070
}
1040
1071
 
1041
1072
 
1042
 
LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
 
1073
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
1043
1074
  // We cannot run this if the serializer is enabled because this will
1044
1075
  // cause us to emit relocation information for the external
1045
1076
  // references. This is fine because the deoptimizer's code section
1053
1084
  masm.GetCode(&desc);
1054
1085
  ASSERT(desc.reloc_size == 0);
1055
1086
 
1056
 
  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
 
1087
  MemoryChunk* chunk =
 
1088
      Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
 
1089
                                                            EXECUTABLE,
 
1090
                                                            NULL);
1057
1091
  if (chunk == NULL) {
1058
1092
    V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
1059
1093
  }
1060
 
  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
1061
 
  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
 
1094
  memcpy(chunk->body(), desc.buffer, desc.instr_size);
 
1095
  CPU::FlushICache(chunk->body(), desc.instr_size);
1062
1096
  return chunk;
1063
1097
}
1064
1098